text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def gnu_getopt(args, shortopts, longopts=[]):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if type('') == type(longopts):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
all_options_first = False
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args | 0.000693 |
def recursively_collect_orders(
name, ctx, all_inputs, orders=None, blacklist=None
):
'''For each possible recipe ordering, try to add the new recipe name
to that order. Recursively do the same thing with all the
dependencies of each recipe.
'''
name = name.lower()
if orders is None:
orders = []
if blacklist is None:
blacklist = set()
try:
recipe = Recipe.get_recipe(name, ctx)
dependencies = get_dependency_tuple_list_for_recipe(
recipe, blacklist=blacklist
)
# handle opt_depends: these impose requirements on the build
# order only if already present in the list of recipes to build
dependencies.extend(fix_deplist(
[[d] for d in recipe.get_opt_depends_in_list(all_inputs)
if d.lower() not in blacklist]
))
if recipe.conflicts is None:
conflicts = []
else:
conflicts = [dep.lower() for dep in recipe.conflicts]
except ValueError:
# The recipe does not exist, so we assume it can be installed
# via pip with no extra dependencies
dependencies = []
conflicts = []
new_orders = []
# for each existing recipe order, see if we can add the new recipe name
for order in orders:
if name in order:
new_orders.append(deepcopy(order))
continue
if order.conflicts():
continue
if any([conflict in order for conflict in conflicts]):
continue
for dependency_set in product(*dependencies):
new_order = deepcopy(order)
new_order[name] = set(dependency_set)
dependency_new_orders = [new_order]
for dependency in dependency_set:
dependency_new_orders = recursively_collect_orders(
dependency, ctx, all_inputs, dependency_new_orders,
blacklist=blacklist
)
new_orders.extend(dependency_new_orders)
return new_orders | 0.000484 |
def _multi_get(self, cache_api_name, fmt_url_path, url_params, query_params=None):
"""Makes multiple GETs to an OpenDNS endpoint.
Args:
cache_api_name: string api_name for caching
fmt_url_path: format string for building URL paths
url_params: An enumerable of strings used in building URLs
query_params - None / dict / list of dicts containing query params
Returns:
A dict of {url_param: api_result}
"""
all_responses = {}
if self._cache:
all_responses = self._cache.bulk_lookup(cache_api_name, url_params)
url_params = [key for key in url_params if key not in all_responses.keys()]
if len(url_params):
urls = self._to_urls(fmt_url_path, url_params)
responses = self._requests.multi_get(urls, query_params)
for url_param, response in zip(url_params, responses):
if self._cache:
self._cache.cache_value(cache_api_name, url_param, response)
all_responses[url_param] = response
return all_responses | 0.004405 |
def _single_learnable_state(state, state_id=0, learnable=True):
"""Returns an initial (maybe learnable) state.
This function does not create any variable scopes, and it should be called
from a Sonnet module. This function also makes sure that all the rows of its
`state` argument have the same value.
Args:
state: initial value of the initial state. It should be a tensor of at least
two dimensions, of which the first dimension corresponds to the
batch_size dimension. All rows of such tensor should have the same value.
state_id: integer that uniquely identifies this state.
learnable: boolean that indicates whether the state is learnable.
Returns:
The initial learnable state `Tensor`.
"""
unpacked_state = tf.unstack(state)
# Assert that all rows have the same values.
assert_rows_equal = [tf.assert_equal(s, unpacked_state[0])
for s in unpacked_state]
# We wish to have all the graph assertions in the graph's critical path,
# so we include them even if the initial state is left unmodified (i.e. when
# the state is not learnable).
# Note: All these assertions will be run every time that data flows
# through the graph. At that point, the control_dependencies context manager
# makes sure that such assertions are run, and will raise an exception if any
# fails.
with tf.control_dependencies(assert_rows_equal):
if not learnable:
return state
else:
state_shape = state.get_shape()
state_shape.assert_is_fully_defined()
state_shape_list = state_shape.as_list()
batch_size, trailing_shape = state_shape_list[0], state_shape_list[1:]
initial_value = tf.reshape(unpacked_state[0], [1] + trailing_shape)
initial_state_variable = tf.get_variable(
"initial_state_%d" % state_id, dtype=initial_value.dtype,
initializer=initial_value)
trailing_size_repeat = [1] * len(trailing_shape)
return tf.tile(initial_state_variable,
tf.constant([batch_size] + trailing_size_repeat)) | 0.011127 |
def send(self, data):
"""
Send date to server
Parameters
----------
data: object that can be serialized to JSON
"""
answer = None
try:
logging.info("Client conntecting to {server}".format(server=self.server_address))
if six.PY2:
sock = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
answer = self.sending(sock, data)
sock.close()
else:
with socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM) as sock:
answer = self.sending(sock, data)
except socket.error as e:
logging.error("Client cannot conntect to {server}: {msg}".format(server=self.server_address, msg=e.strerror))
return None
return answer | 0.006881 |
def get_hosted_service_properties(self, service_name, embed_detail=False):
'''
Retrieves system properties for the specified hosted service. These
properties include the service name and service type; the name of the
affinity group to which the service belongs, or its location if it is
not part of an affinity group; and optionally, information on the
service's deployments.
service_name:
Name of the hosted service.
embed_detail:
When True, the management service returns properties for all
deployments of the service, as well as for the service itself.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('embed_detail', embed_detail)
return self._perform_get(
self._get_hosted_service_path(service_name) +
'?embed-detail=' +
_str(embed_detail).lower(),
HostedService) | 0.002058 |
def remove_edge(self, id1, id2):
""" Remove edges between nodes with given id's.
"""
for e in list(self.edges):
if id1 in (e.node1.id, e.node2.id) and \
id2 in (e.node1.id, e.node2.id):
e.node1.links.remove(e.node2)
e.node2.links.remove(e.node1)
self.edges.remove(e) | 0.010444 |
def rect(self):
"""Rectangle containing the annot"""
CheckParent(self)
val = _fitz.Annot_rect(self)
val = Rect(val)
return val | 0.011905 |
def install_dap(name, version='', update=False, update_allpaths=False, first=True,
force=False, nodeps=False, reinstall=False, __ui__=''):
'''Install a dap from dapi
If update is True, it will remove previously installed daps of the same name'''
m, d = _get_metadap_dap(name, version)
if update:
available = d['version']
current = get_installed_version_of(name)
if not current:
raise DapiLocalError('Cannot update not yet installed DAP.')
if dapver.compare(available, current) <= 0:
return []
path, remove_dir = download_dap(name, d=d)
ret = install_dap_from_path(path, update=update, update_allpaths=update_allpaths, first=first,
force=force, nodeps=nodeps, reinstall=reinstall, __ui__=__ui__)
try:
if remove_dir:
shutil.rmtree(os.dirname(path))
else:
os.remove(path)
except:
pass
return ret | 0.006104 |
def remove_tweets(self, url):
"""Tries to remove cached tweets."""
try:
del self.cache[url]
self.mark_updated()
return True
except KeyError:
return False | 0.008889 |
def _assert_is_type(name, value, value_type):
"""Assert that a value must be a given type."""
if not isinstance(value, value_type):
if type(value_type) is tuple:
types = ', '.join(t.__name__ for t in value_type)
raise ValueError('{0} must be one of ({1})'.format(name, types))
else:
raise ValueError('{0} must be {1}'
.format(name, value_type.__name__)) | 0.002262 |
def md5_digest(instr):
'''
Generate an md5 hash of a given string.
'''
return salt.utils.stringutils.to_unicode(
hashlib.md5(salt.utils.stringutils.to_bytes(instr)).hexdigest()
) | 0.004854 |
def send_request(self):
"""Send request.
[:rfc:`2131#section-3.1`]::
a client retransmitting as described in section 4.1 might retransmit
the DHCPREQUEST message four times, for a total delay of 60 seconds
.. todo::
- The maximum number of retransmitted REQUESTs is per state or in
total?
- Are the retransmitted REQUESTs independent to the retransmitted
DISCOVERs?
"""
assert self.client
if self.current_state == STATE_BOUND:
pkt = self.client.gen_request_unicast()
else:
pkt = self.client.gen_request()
sendp(pkt)
logger.debug('Modifying FSM obj, setting time_sent_request.')
self.time_sent_request = nowutc()
logger.info('DHCPREQUEST of %s on %s to %s port %s',
self.client.iface, self.client.client_ip,
self.client.server_ip, self.client.server_port)
# NOTE: see previous TODO, maybe the MAX_ATTEMPTS_REQUEST needs to be
# calculated per state.
if self.request_attempts < MAX_ATTEMPTS_REQUEST:
self.request_attempts *= 2
logger.debug('Increased request attempts to %s',
self.request_attempts)
if self.current_state == STATE_RENEWING:
timeout_renewing = gen_timeout_request_renew(self.client.lease)
self.set_timeout(self.current_state,
self.timeout_request_renewing,
timeout_renewing)
elif self.current_state == STATE_REBINDING:
timeout_rebinding = gen_timeout_request_rebind(self.client.lease)
self.set_timeout(self.current_state,
self.timeout_request_rebinding,
timeout_rebinding)
else:
timeout_requesting = \
gen_timeout_resend(self.request_attempts)
self.set_timeout(self.current_state,
self.timeout_requesting,
timeout_requesting) | 0.000944 |
def _serialize_into_store(profile, filter=None):
"""
Takes data from app layer and serializes the models into the store.
"""
# ensure that we write and retrieve the counter in one go for consistency
current_id = InstanceIDModel.get_current_instance_and_increment_counter()
with transaction.atomic():
# create Q objects for filtering by prefixes
prefix_condition = None
if filter:
prefix_condition = functools.reduce(lambda x, y: x | y, [Q(_morango_partition__startswith=prefix) for prefix in filter])
# filter through all models with the dirty bit turned on
syncable_dict = _profile_models[profile]
for (_, klass_model) in six.iteritems(syncable_dict):
new_store_records = []
new_rmc_records = []
klass_queryset = klass_model.objects.filter(_morango_dirty_bit=True)
if prefix_condition:
klass_queryset = klass_queryset.filter(prefix_condition)
store_records_dict = Store.objects.in_bulk(id_list=klass_queryset.values_list('id', flat=True))
for app_model in klass_queryset:
try:
store_model = store_records_dict[app_model.id]
# if store record dirty and app record dirty, append store serialized to conflicting data
if store_model.dirty_bit:
store_model.conflicting_serialized_data = store_model.serialized + "\n" + store_model.conflicting_serialized_data
store_model.dirty_bit = False
# set new serialized data on this store model
ser_dict = json.loads(store_model.serialized)
ser_dict.update(app_model.serialize())
store_model.serialized = DjangoJSONEncoder().encode(ser_dict)
# create or update instance and counter on the record max counter for this store model
RecordMaxCounter.objects.update_or_create(defaults={'counter': current_id.counter},
instance_id=current_id.id,
store_model_id=store_model.id)
# update last saved bys for this store model
store_model.last_saved_instance = current_id.id
store_model.last_saved_counter = current_id.counter
# update deleted flags in case it was previously deleted
store_model.deleted = False
store_model.hard_deleted = False
# update this model
store_model.save()
except KeyError:
kwargs = {
'id': app_model.id,
'serialized': DjangoJSONEncoder().encode(app_model.serialize()),
'last_saved_instance': current_id.id,
'last_saved_counter': current_id.counter,
'model_name': app_model.morango_model_name,
'profile': app_model.morango_profile,
'partition': app_model._morango_partition,
'source_id': app_model._morango_source_id,
}
# check if model has FK pointing to it and add the value to a field on the store
self_ref_fk = _self_referential_fk(klass_model)
if self_ref_fk:
self_ref_fk_value = getattr(app_model, self_ref_fk)
kwargs.update({'_self_ref_fk': self_ref_fk_value or ''})
# create store model and record max counter for the app model
new_store_records.append(Store(**kwargs))
new_rmc_records.append(RecordMaxCounter(store_model_id=app_model.id, instance_id=current_id.id, counter=current_id.counter))
# bulk create store and rmc records for this class
Store.objects.bulk_create(new_store_records)
RecordMaxCounter.objects.bulk_create(new_rmc_records)
# set dirty bit to false for all instances of this model
klass_queryset.update(update_dirty_bit_to=False)
# get list of ids of deleted models
deleted_ids = DeletedModels.objects.filter(profile=profile).values_list('id', flat=True)
# update last_saved_bys and deleted flag of all deleted store model instances
deleted_store_records = Store.objects.filter(id__in=deleted_ids)
deleted_store_records.update(dirty_bit=False, deleted=True, last_saved_instance=current_id.id, last_saved_counter=current_id.counter)
# update rmcs counters for deleted models that have our instance id
RecordMaxCounter.objects.filter(instance_id=current_id.id, store_model_id__in=deleted_ids).update(counter=current_id.counter)
# get a list of deleted model ids that don't have an rmc for our instance id
new_rmc_ids = deleted_store_records.exclude(recordmaxcounter__instance_id=current_id.id).values_list("id", flat=True)
# bulk create these new rmcs
RecordMaxCounter.objects.bulk_create([RecordMaxCounter(store_model_id=r_id, instance_id=current_id.id, counter=current_id.counter) for r_id in new_rmc_ids])
# clear deleted models table for this profile
DeletedModels.objects.filter(profile=profile).delete()
# handle logic for hard deletion models
hard_deleted_ids = HardDeletedModels.objects.filter(profile=profile).values_list('id', flat=True)
hard_deleted_store_records = Store.objects.filter(id__in=hard_deleted_ids)
hard_deleted_store_records.update(hard_deleted=True, serialized='{}', conflicting_serialized_data='')
HardDeletedModels.objects.filter(profile=profile).delete()
# update our own database max counters after serialization
if not filter:
DatabaseMaxCounter.objects.update_or_create(instance_id=current_id.id, partition="", defaults={'counter': current_id.counter})
else:
for f in filter:
DatabaseMaxCounter.objects.update_or_create(instance_id=current_id.id, partition=f, defaults={'counter': current_id.counter}) | 0.004438 |
def define_system_args(subparsers):
"""Append the parser arguments for the 'system' commands"""
system_parser = subparsers.add_parser("system", help='Available commands: \'info\'')
system_subparsers = system_parser.add_subparsers(help='System commands')
# system info arguments
info_parser = system_subparsers.add_parser('info', help='Get system status information')
info_parser.add_argument('--src', required=True, dest='src', metavar='src',
help='The instance name of the target SDC (must match the name in sdc-hosts.yml)')
info_parser.set_defaults(func=info_command) | 0.00638 |
def create_log_group(awsclient, log_group_name):
"""Creates a log group with the specified name.
:param log_group_name: log group name
:return:
"""
client_logs = awsclient.get_client('logs')
response = client_logs.create_log_group(
logGroupName=log_group_name,
) | 0.003333 |
def attention_bias_local_block(mesh, block_length, memory_length,
dtype=tf.int32):
"""Bias for attention for local blocks where attention to right is disallowed.
Create the bias matrix by using two separate masks, one for the memory part
which doesn't overlap with the query and second which interacts with the query
and should be disallowed to look to the right of the current query position.
Args:
mesh: a MeshTensorflow object
block_length: a mtf.Dimension
memory_length: a mtf.Dimension
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [block_length, memory_length]
"""
memory_length = mtf.Dimension(memory_length.name, block_length.size)
memory_mask = mtf.zeros(mesh, [block_length, memory_length], dtype=dtype)
mask = mtf.cast(mtf.less(mtf.range(mesh, block_length, dtype=dtype),
mtf.range(mesh, memory_length, dtype=dtype)),
dtype=dtype)
mask = mtf.cast(
mtf.concat([memory_mask, mask], memory_length.name),
dtype=tf.float32) * -1e9
return mask | 0.008264 |
def streamweigths_get(self, session):
'''taobao.wangwang.eservice.streamweigths.get 获取分流权重接口
获取当前登录用户自己的店铺内的分流权重设置'''
request = TOPRequest('taobao.wangwang.eservice.streamweigths.get')
self.create(self.execute(request, session))
return self.staff_stream_weights | 0.009677 |
def registerDeferred(self, event, d):
"""
Register a defer to be fired at the firing of a specific event.
:param string event: Currently supported values are `connect`. Another
value may be `_dtor` which will register an event to fire when this
object has been completely destroyed.
:param event: The defered to fire when the event succeeds or failes
:type event: :class:`Deferred`
If this event has already fired, the deferred will be triggered
asynchronously.
Example::
def on_connect(*args):
print("I'm connected")
def on_connect_err(*args):
print("Connection failed")
d = Deferred()
cb.registerDeferred('connect', d)
d.addCallback(on_connect)
d.addErrback(on_connect_err)
:raise: :exc:`ValueError` if the event name is unrecognized
"""
try:
self._evq[event].schedule(d)
except KeyError:
raise ValueError("No such event type", event) | 0.001867 |
def print(root):
# type: (Union[Nonterminal,Terminal,Rule])-> str
"""
Transform the parsed tree to the string. Expects tree like structure.
You can see example output below.
(R)SplitRules26
|--(N)Iterate
| `--(R)SplitRules30
| `--(N)Symb
| `--(R)SplitRules4
| `--(T)e
`--(N)Concat
`--(R)SplitRules27
`--(N)Iterate
`--(R)SplitRules30
`--(N)Symb
`--(R)SplitRules5
`--(T)f
:param root: Root node of the parsed tree.
:return: String representing the parsed tree (ends with newline).
"""
# print the part before the element
def print_before(previous=0, defined=None, is_last=False):
defined = defined or {}
ret = ''
if previous != 0:
for i in range(previous - 1):
# if the column is still active write |
if i in defined:
ret += '| '
# otherwise just print space
else:
ret += ' '
# if is current element last child, don't print |-- but `-- instead
ret += '`--' if is_last else '|--'
return ret
# print the terminal
def terminal_traverse(term, callback, previous=0, defined=None, is_last=False):
before = print_before(previous, defined, is_last)
yield before + '(T)' + str(term.s) + '\n'
# print the nonterminal
def nonterminal_traverse(nonterm, callback, previous=0, defined=None, is_last=False):
before = print_before(previous, defined, is_last)
yield before + '(N)' + nonterm.__class__.__name__ + '\n'
yield callback(nonterm.to_rule, previous + 1, defined, True)
# print the rule
def rule_traverse(rule, callback, previous=0, defined=None, is_last=False):
# print the rule name
before = print_before(previous, defined, is_last)
yield before + '(R)' + rule.__class__.__name__ + '\n'
# register new column
defined = defined or set()
defined.add(previous)
# print all childs except the last one
for i in range(len(rule.to_symbols) - 1):
yield callback(rule.to_symbols[i], previous + 1, defined, False)
# unregister the column as last child print it automatically
defined.remove(previous)
yield callback(rule.to_symbols[-1], previous + 1, defined, True)
res = Traversing.traverse_separated(root, rule_traverse, nonterminal_traverse, terminal_traverse)
return str.join("", res) | 0.003176 |
def compute_bayes_cone(preds, starting_value=1.):
"""
Compute 5, 25, 75 and 95 percentiles of cumulative returns, used
for the Bayesian cone.
Parameters
----------
preds : numpy.array
Multiple (simulated) cumulative returns.
starting_value : int (optional)
Have cumulative returns start around this value.
Default = 1.
Returns
-------
dict of percentiles over time
Dictionary mapping percentiles (5, 25, 75, 95) to a
timeseries.
"""
def scoreatpercentile(cum_preds, p):
return [stats.scoreatpercentile(
c, p) for c in cum_preds.T]
cum_preds = np.cumprod(preds + 1, 1) * starting_value
perc = {p: scoreatpercentile(cum_preds, p) for p in (5, 25, 75, 95)}
return perc | 0.001266 |
def from_callback(cls, cb, ny=None, nparams=None, dep_transf_cbs=None,
indep_transf_cbs=None, roots_cb=None, **kwargs):
"""
Create an instance from a callback.
Analogous to :func:`SymbolicSys.from_callback`.
Parameters
----------
cb : callable
Signature ``rhs(x, y[:], p[:]) -> f[:]``
ny : int
length of y
nparams : int
length of p
dep_transf_cbs : iterable of pairs callables
callables should have the signature ``f(yi) -> expression`` in yi
indep_transf_cbs : pair of callbacks
callables should have the signature ``f(x) -> expression`` in x
roots_cb : callable
Callback with signature ``roots(x, y[:], p[:], backend=math) -> r[:]``.
Callback should return untransformed roots.
\*\*kwargs :
Keyword arguments passed onto :class:`TransformedSys`.
"""
ny, nparams = _get_ny_nparams_from_kw(ny, nparams, kwargs)
be = Backend(kwargs.pop('backend', None))
x, = be.real_symarray('x', 1)
y = be.real_symarray('y', ny)
p = be.real_symarray('p', nparams)
_y = dict(zip(kwargs['names'], y)) if kwargs.get('dep_by_name', False) else y
_p = dict(zip(kwargs['param_names'], p)) if kwargs.get('par_by_name', False) else p
exprs = _ensure_4args(cb)(x, _y, _p, be)
if dep_transf_cbs is not None:
dep_transf = [(fw(yi), bw(yi)) for (fw, bw), yi
in zip(dep_transf_cbs, y)]
else:
dep_transf = None
if indep_transf_cbs is not None:
indep_transf = indep_transf_cbs[0](x), indep_transf_cbs[1](x)
else:
indep_transf = None
if kwargs.get('dep_by_name', False):
exprs = [exprs[k] for k in kwargs['names']]
cls._kwargs_roots_from_roots_cb(roots_cb, kwargs, x, _y, _p, be)
return cls(list(zip(y, exprs)), x, dep_transf,
indep_transf, p, backend=be, **kwargs) | 0.003835 |
def is_command(self, text: str) -> bool:
"""
checks for presence of shebang in the first character of the text
"""
if text[0] in self.shebangs:
return True
return False | 0.00905 |
def fav_songs(self):
"""
FIXME: 支持获取所有的收藏歌曲
"""
if self._fav_songs is None:
songs_data = self._api.user_favorite_songs(self.identifier)
self._fav_songs = []
if not songs_data:
return
for song_data in songs_data:
song = _deserialize(song_data, NestedSongSchema)
self._fav_songs.append(song)
return self._fav_songs | 0.004464 |
def in_clip(self, x, y):
"""Tests whether the given point is inside the area
that would be visible through the current clip,
i.e. the area that would be filled by a :meth:`paint` operation.
See :meth:`clip`, and :meth:`clip_preserve`.
:param x: X coordinate of the point to test
:param y: Y coordinate of the point to test
:type x: float
:type y: float
:returns: A boolean.
*New in cairo 1.10.*
"""
return bool(cairo.cairo_in_clip(self._pointer, x, y)) | 0.003623 |
def get(ctx):
"""Get info for current project, by project_name, or user/project_name.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
To get current project:
\b
```bash
$ polyaxon project get
```
To get a project by name
\b
```bash
$ polyaxon project get user/project
```
"""
user, project_name = get_project_or_local(ctx.obj.get('project'))
try:
response = PolyaxonClient().project.get_project(user, project_name)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
get_project_details(response) | 0.002538 |
def main_crop():
"""This function does the real work. It is called by main() in
pdfCropMargins.py, which just handles catching exceptions and cleaning up."""
##
## Process some of the command-line arguments.
##
if args.verbose:
print("\nProcessing the PDF with pdfCropMargins (version", __version__+")...")
print("System type:", ex.system_os)
if len(args.pdf_input_doc) > 1:
print("\nError in pdfCropMargins: Only one input PDF document is allowed."
"\nFound more than one on the command line:", file=sys.stderr)
for f in args.pdf_input_doc:
print(" ", f, file=sys.stderr)
ex.cleanup_and_exit(1)
input_doc_fname = ex.glob_if_windows_os(args.pdf_input_doc[0], exact_num_args=1)[0]
if not input_doc_fname.endswith((".pdf",".PDF")):
print("\nWarning in pdfCropMargins: The file extension is neither '.pdf'"
"\nnor '.PDF'; continuing anyway.\n", file=sys.stderr)
if args.verbose:
print("\nThe input document's filename is:\n ", input_doc_fname)
if not os.path.isfile(input_doc_fname):
print("\nError in pdfCropMargins: The specified input file\n "
+ input_doc_fname + "\nis not a file or does not exist.",
file=sys.stderr)
ex.cleanup_and_exit(1)
if not args.outfile:
if args.verbose: print("\nUsing the default-generated output filename.")
output_doc_fname = generate_default_filename(input_doc_fname)
else:
output_doc_fname = ex.glob_if_windows_os(args.outfile[0], exact_num_args=1)[0]
if args.verbose:
print("\nThe output document's filename will be:\n ", output_doc_fname)
if os.path.lexists(output_doc_fname) and args.noclobber:
print("\nOption '--noclobber' is set, refusing to overwrite an existing"
"\nfile with filename:\n ", output_doc_fname, file=sys.stderr)
ex.cleanup_and_exit(1)
if os.path.lexists(output_doc_fname) and ex.samefile(input_doc_fname,
output_doc_fname):
print("\nError in pdfCropMargins: The input file is the same as"
"\nthe output file.\n", file=sys.stderr)
ex.cleanup_and_exit(1)
if args.gsBbox and len(args.fullPageBox) > 1:
print("\nWarning: only one --fullPageBox value can be used with the -gs option.",
"\nIgnoring all but the first one.", file=sys.stderr)
args.fullPageBox = [args.fullPageBox[0]]
elif args.gsBbox and not args.fullPageBox: args.fullPageBox = ["c"] # gs default
elif not args.fullPageBox: args.fullPageBox = ["m", "c"] # usual default
if args.verbose:
print("\nFor the full page size, using values from the PDF box"
"\nspecified by the intersection of these boxes:", args.fullPageBox)
if args.absolutePreCrop: args.absolutePreCrop *= 4 # expand to 4 offsets
# See if all four offsets are explicitly set and use those if so.
if args.absolutePreCrop4: args.absolutePreCrop = args.absolutePreCrop4
if args.verbose:
print("\nThe absolute pre-crops to be applied to each margin, in units of bp,"
" are:\n ", args.absolutePreCrop)
if args.percentRetain: args.percentRetain *= 4 # expand to 4 percents
# See if all four percents are explicitly set and use those if so.
if args.percentRetain4: args.percentRetain = args.percentRetain4
if args.verbose:
print("\nThe percentages of margins to retain are:\n ",
args.percentRetain)
if args.absoluteOffset: args.absoluteOffset *= 4 # expand to 4 offsets
# See if all four offsets are explicitly set and use those if so.
if args.absoluteOffset4: args.absoluteOffset = args.absoluteOffset4
if args.verbose:
print("\nThe absolute offsets to be applied to each margin, in units of bp,"
" are:\n ", args.absoluteOffset)
# Parse the page ratio into a float if user chose that option.
if args.setPageRatios:
ratio = args.setPageRatios[0].split(":")
if len(ratio) > 2:
print("\nError in pdfCropMargins: Bad format in aspect ratio command line"
" argument.\nToo many colons.")
ex.cleanup_and_exit(1)
try:
if len(ratio) == 2:
args.setPageRatios[0] = float(ratio[0])/float(ratio[1])
else:
args.setPageRatios[0] = float(ratio[0])
except ValueError:
print("\nError in pdfCropMargins: Bad format in aspect ratio command line"
" argument.\nCannot convert to a float.")
ex.cleanup_and_exit(1)
# Set executable paths to non-default locations if set.
if args.pdftoppmPath: ex.set_pdftoppm_executable_to_string(args.pdftoppmPath)
if args.ghostscriptPath: ex.set_gs_executable_to_string(args.ghostscriptPath)
# If the option settings require pdftoppm, make sure we have a running
# version. If '--gsBbox' isn't chosen then assume that PDF pages are to be
# explicitly rendered. In that case we either need pdftoppm or gs to do the
# rendering.
gs_render_fallback_set = False # Set True if we switch to gs option as a fallback.
if not args.gsBbox and not args.gsRender:
found_pdftoppm = ex.init_and_test_pdftoppm_executable(
prefer_local=args.pdftoppmLocal)
if args.verbose:
print("\nFound pdftoppm program at:", found_pdftoppm)
if not found_pdftoppm:
args.gsRender = True
gs_render_fallback_set = True
if args.verbose:
print("\nNo pdftoppm executable found; using Ghostscript for rendering.")
# If any options require Ghostscript, make sure it it installed.
if args.gsBbox or args.gsFix or args.gsRender:
found_gs = ex.init_and_test_gs_executable()
if args.verbose:
print("\nFound Ghostscript program at:", found_gs)
if args.gsBbox and not found_gs:
print("\nError in pdfCropMargins: The '--gsBbox' option was specified but"
"\nthe Ghostscript executable could not be located. Is it"
"\ninstalled and in the PATH for command execution?\n", file=sys.stderr)
ex.cleanup_and_exit(1)
if args.gsFix and not found_gs:
print("\nError in pdfCropMargins: The '--gsFix' option was specified but"
"\nthe Ghostscript executable could not be located. Is it"
"\ninstalled and in the PATH for command execution?\n", file=sys.stderr)
ex.cleanup_and_exit(1)
if args.gsRender and not found_gs:
if gs_render_fallback_set:
print("\nError in pdfCropMargins: Neither Ghostscript nor pdftoppm"
"\nwas found in the PATH for command execution. At least one is"
"\nrequired.\n", file=sys.stderr)
else:
print("\nError in pdfCropMargins: The '--gsRender' option was specified but"
"\nthe Ghostscript executable could not be located. Is it"
"\ninstalled and in the PATH for command execution?\n", file=sys.stderr)
ex.cleanup_and_exit(1)
# Give a warning message if incompatible option combinations have been selected.
if args.gsBbox and args.threshold:
print("\nWarning in pdfCropMargins: The '--threshold' option is ignored"
"\nwhen the '--gsBbox' option is also selected.\n", file=sys.stderr)
if args.gsBbox and args.numBlurs:
print("\nWarning in pdfCropMargins: The '--numBlurs' option is ignored"
"\nwhen the '--gsBbox' option is also selected.\n", file=sys.stderr)
if args.gsBbox and args.numSmooths:
print("\nWarning in pdfCropMargins: The '--numSmooths' option is ignored"
"\nwhen the '--gsBbox' option is also selected.\n", file=sys.stderr)
##
## Open the input document in a PdfFileReader object. Due to an apparent bug
## in pyPdf we open two PdfFileReader objects for the file. The time required should
## still be small relative to finding the bounding boxes of pages. The bug is
## that writing a PdfFileWriter tends to hang on certain files if 1) pages from
## the same PdfFileReader are shared between two PdfFileWriter objects, or 2)
## the PdfFileWriter is written, the pages are modified, and there is an attempt
## to write the same PdfFileWriter to a different file.
##
if args.gsFix:
if args.verbose:
print("\nAttempting to fix the PDF input file before reading it...")
fixed_input_doc_fname = ex.fix_pdf_with_ghostscript_to_tmp_file(input_doc_fname)
else:
fixed_input_doc_fname = input_doc_fname
# Open the input file object.
# TODO: Need try except since might fail for permissions.
fixed_input_doc_file_object = open(fixed_input_doc_fname, "rb")
try:
input_doc = PdfFileReader(fixed_input_doc_file_object)
tmp_input_doc = PdfFileReader(fixed_input_doc_file_object)
except (KeyboardInterrupt, EOFError):
raise
except: # Can raise various exceptions, just catch the rest here.
print("\nError in pdfCropMargins: The pyPdf module failed in an attempt"
"\nto read the input file. Is the file a PDF file? If so then it"
"\nmay be corrupted. If you have Ghostscript, try the '--gsFix'"
"\noption (assuming you are not using it already). That option can"
"\nalso convert some PostScript files to a readable format.",
file=sys.stderr)
ex.cleanup_and_exit(1)
##
## See if the document needs to be decrypted.
##
if args.password:
try:
input_doc.decrypt(args.password)
tmp_input_doc.decrypt(args.password)
except KeyError:
print("\nDecrypting with the password from the '--password' option"
"\nfailed.", file=sys.stderr)
ex.cleanup_and_exit(1)
else: # try decrypting with an empty password
try:
input_doc.decrypt("")
tmp_input_doc.decrypt("")
except KeyError:
pass # document apparently wasn't encrypted with an empty password
##
## Print out some data and metadata in verbose mode.
##
if args.verbose:
print("\nThe input document has %s pages." % input_doc.getNumPages())
try: # This is needed because the call sometimes just raises an error.
metadata_info = input_doc.getDocumentInfo()
except:
print("\nWarning: Document metadata could not be read.", file=sys.stderr)
metadata_info = None
if args.verbose and not metadata_info:
print("\nNo readable metadata in the document.")
elif args.verbose:
try:
print("\nThe document's metadata, if set:\n")
print(" The Author attribute set in the input document is:\n %s"
% (metadata_info.author))
print(" The Creator attribute set in the input document is:\n %s"
% (metadata_info.creator))
print(" The Producer attribute set in the input document is:\n %s"
% (metadata_info.producer))
print(" The Subject attribute set in the input document is:\n %s"
% (metadata_info.subject))
print(" The Title attribute set in the input document is:\n %s"
% (metadata_info.title))
# Some metadata cannot be decoded or encoded, at least on Windows. Could
# print from a function instead to write all the lines which can be written.
except (UnicodeDecodeError, UnicodeEncodeError):
print("\nWarning: Could not write all the document's metadata to the screen."
"\nGot a UnicodeEncodeError or a UnicodeDecodeError.")
##
## Now compute the set containing the pyPdf page number of all the pages
## which the user has selected for cropping from the command line. Most
## calculations are still carried-out for all the pages in the document.
## (There are a few optimizations for expensive operations like finding
## bounding boxes; the rest is negligible). This keeps the correspondence
## between page numbers and the positions of boxes in the box lists. The
## function apply_crop_list then just ignores the cropping information for any
## pages which were not selected.
##
all_page_nums = set(range(0, input_doc.getNumPages()))
page_nums_to_crop = set() # Note that this set holds page num MINUS ONE, start at 0.
if args.pages:
# Parse any page range specifier argument.
for page_num_or_range in args.pages.split(","):
split_range = page_num_or_range.split("-")
try:
if len(split_range) == 1:
# Note pyPdf page nums start at 0, not 1 like usual PDF pages,
# subtract 1.
page_nums_to_crop.add(int(split_range[0])-1)
else:
page_nums_to_crop.update(
set(range(int(split_range[0])-1, int(split_range[1]))))
except ValueError:
print(
"\nError in pdfCropMargins: The page range specified on the command",
"\nline contains a non-integer value or otherwise cannot be parsed.",
file=sys.stderr)
ex.cleanup_and_exit(1)
page_nums_to_crop = page_nums_to_crop & all_page_nums # intersect chosen with actual
else:
page_nums_to_crop = all_page_nums
# Print out the pages to crop in verbose mode.
if args.verbose and args.pages:
print("\nThese pages of the document will be cropped:", end="")
p_num_list = sorted(list(page_nums_to_crop))
num_pages_to_crop = len(p_num_list)
for i in range(num_pages_to_crop):
if i % 10 == 0 and i != num_pages_to_crop - 1:
print("\n ", end="")
print("%5d" % (p_num_list[i]+1), " ", end="")
print()
elif args.verbose:
print("\nAll the pages of the document will be cropped.")
##
## Get a list with the full-page boxes for each page: (left,bottom,right,top)
## This function also sets the MediaBox and CropBox of the pages to the
## chosen full-page size as a side-effect, saving the old boxes.
##
full_page_box_list, rotation_list = get_full_page_box_list_assigning_media_and_crop(
input_doc)
tmp_full_page_box_list, tmp_rotation_list = get_full_page_box_list_assigning_media_and_crop(
tmp_input_doc, quiet=True)
##
## Define a PdfFileWriter object and copy input_doc info over to it.
##
output_doc, tmp_output_doc, already_cropped_by_this_program = setup_output_document(
input_doc, tmp_input_doc, metadata_info)
##
## Write out the PDF document again, with the CropBox and MediaBox reset.
## This temp version is only used for calculating the bounding boxes of
## pages. Note we are writing from tmp_output_doc (due to an apparent bug
## discussed above). After this tmp_input_doc and tmp_output_doc are no longer
## needed.
##
if not args.restore:
doc_with_crop_and_media_boxes_name = ex.get_temporary_filename(".pdf")
doc_with_crop_and_media_boxes_object = open(
doc_with_crop_and_media_boxes_name, "wb")
if args.verbose:
print("\nWriting out the PDF with the CropBox and MediaBox redefined.")
try:
tmp_output_doc.write(doc_with_crop_and_media_boxes_object)
except (KeyboardInterrupt, EOFError):
raise
except: # PyPDF2 can raise various exceptions.
print("\nError in pdfCropMargins: The pyPdf program failed in trying to"
"\nwrite out a PDF file of the document. The document may be"
"\ncorrupted. If you have Ghostscript, try using the '--gsFix'"
"\noption (assuming you are not already using it).", file=sys.stderr)
ex.cleanup_and_exit(1)
doc_with_crop_and_media_boxes_object.close()
##
## Calculate the bounding_box_list containing tight page bounds for each page.
##
if not args.restore:
bounding_box_list = get_bounding_box_list(doc_with_crop_and_media_boxes_name,
input_doc, full_page_box_list, page_nums_to_crop, args, PdfFileWriter)
if args.verbose:
print("\nThe bounding boxes are:")
for pNum, b in enumerate(bounding_box_list):
print("\t", pNum+1, "\t", b)
##
## Calculate the crop_list based on the fullpage boxes and the bounding boxes.
##
if not args.restore:
crop_list = calculate_crop_list(full_page_box_list, bounding_box_list,
rotation_list, page_nums_to_crop)
else:
crop_list = None # Restore, not needed in this case.
##
## Apply the calculated crops to the pages of the PdfFileReader input_doc.
## These pages are copied to the PdfFileWriter output_doc.
##
apply_crop_list(crop_list, input_doc, page_nums_to_crop,
already_cropped_by_this_program)
##
## Write the final PDF out to a file.
##
if args.verbose: print("\nWriting the cropped PDF file.")
# TODO: Try and except on the open, since it might fail for permissions.
output_doc_stream = open(output_doc_fname, "wb")
try:
output_doc.write(output_doc_stream)
except (KeyboardInterrupt, EOFError):
raise
except: # PyPDF2 can raise various exceptions.
try:
# We know the write succeeded on tmp_output_doc or we wouldn't be here.
# Malformed document catalog info can cause write failures, so get
# a new output_doc without that data and try the write again.
print("\nWrite failure, trying one more time...", file=sys.stderr)
output_doc_stream.close()
output_doc_stream = open(output_doc_fname, "wb")
output_doc, tmp_output_doc, already_cropped = setup_output_document(
input_doc, tmp_input_doc, metadata_info, copy_document_catalog=False)
output_doc.write(output_doc_stream)
print("\nWarning: Document catalog data caused a write failure. A retry"
"\nwithout that data succeeded. No document catalog information was"
"\ncopied to the cropped output file. Try fixing the PDF file. If"
"\nyou have ghostscript installed, run pdfCropMargins with the '--gsFix'"
"\noption. You can also try blacklisting some of the document catalog"
"\nitems using the '--dcb' option.", file=sys.stderr)
except (KeyboardInterrupt, EOFError):
raise
except: # Give up... PyPDF2 can raise many errors for many reasons.
print("\nError in pdfCropMargins: The pyPdf program failed in trying to"
"\nwrite out a PDF file of the document. The document may be"
"\ncorrupted. If you have Ghostscript, try using the '--gsFix'"
"\noption (assuming you are not already using it).", file=sys.stderr)
ex.cleanup_and_exit(1)
output_doc_stream.close()
# We're finished with this open file; close it and let temp dir removal delete it.
fixed_input_doc_file_object.close()
##
## Now handle the options which apply after the file is written.
##
def do_preview(output_doc_fname):
viewer = args.preview
if args.verbose:
print("\nPreviewing the output document with viewer:\n ", viewer)
ex.show_preview(viewer, output_doc_fname)
return
# Handle the '--queryModifyOriginal' option.
if args.queryModifyOriginal:
if args.preview:
print("\nRunning the preview viewer on the file, will query whether or not"
"\nto modify the original file after the viewer is launched in the"
"\nbackground...\n")
do_preview(output_doc_fname)
# Give preview time to start; it may write startup garbage to the terminal...
query_wait_time = 2 # seconds
time.sleep(query_wait_time)
print()
while True:
query_string = "\nModify the original file to the cropped file " \
"(saving the original)? [yn] "
if ex.python_version[0] == "2":
query_result = raw_input(query_string).decode("utf-8")
else:
query_result = input(query_string)
if query_result in ["y", "Y"]:
args.modifyOriginal = True
print("\nModifying the original file.")
break
elif query_result in ["n", "N"]:
print("\nNot modifying the original file. The cropped file is saved"
" as:\n {0}".format(output_doc_fname))
args.modifyOriginal = False
break
else:
print("Response must be in the set {y,Y,n,N}, none recognized.")
continue
# Handle the '--modifyOriginal' option.
if args.modifyOriginal:
generated_uncropped_filename = generate_default_filename(
input_doc_fname, is_cropped_file=False)
# Remove any existing file with the name generated_uncropped_filename unless a
# relevant noclobber option is set or it isn't a file.
if os.path.exists(generated_uncropped_filename):
if (os.path.isfile(generated_uncropped_filename)
and not args.noclobberOriginal and not args.noclobber):
if args.verbose:
print("\nRemoving the file\n ", generated_uncropped_filename)
try:
os.remove(generated_uncropped_filename)
except OSError:
print("Removing the file {} failed. Maybe a permission error?"
"\nFiles are as if option '--modifyOriginal' were not set."
.format(generated_uncropped_filename))
args.modifyOriginal = False # Failed.
else:
print("\nA noclobber option is set or else not a file; refusing to"
" overwrite:\n ", generated_uncropped_filename,
"\nFiles are as if option '--modifyOriginal' were not set.",
file=sys.stderr)
args.modifyOriginal = False # Failed.
# Move the original file to the name for uncropped files. Silently do nothing
# if the file exists (should have been removed above).
if not os.path.exists(generated_uncropped_filename):
if args.verbose: print("\nDoing a file move:\n ", input_doc_fname,
"\nis moving to:\n ", generated_uncropped_filename)
shutil.move(input_doc_fname, generated_uncropped_filename)
# Move the cropped file to the original file's name. Silently do nothing if
# the file exists (should have been moved above).
if not os.path.exists(input_doc_fname):
if args.verbose: print("\nDoing a file move:\n ", output_doc_fname,
"\nis moving to:\n ", input_doc_fname)
shutil.move(output_doc_fname, input_doc_fname)
# Handle any previewing which still needs to be done.
if args.preview and not args.queryModifyOriginal: # already previewed in query mod
if args.modifyOriginal: # already swapped to original filename in this case
do_preview(input_doc_fname)
else: # the usual case, preview the output filename
do_preview(output_doc_fname)
if args.verbose: print("\nFinished this run of pdfCropMargins.\n") | 0.008001 |
def list_prefix(self):
""" List prefixes and return JSON encoded result.
"""
# fetch attributes from request.json
attr = XhrController.extract_prefix_attr(request.json)
try:
prefixes = Prefix.list(attr)
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(prefixes, cls=NipapJSONEncoder) | 0.006912 |
def _cachedSqlType(cls):
"""
Cache the sqlType() into class, because it's heavy used in `toInternal`.
"""
if not hasattr(cls, "_cached_sql_type"):
cls._cached_sql_type = cls.sqlType()
return cls._cached_sql_type | 0.011407 |
def order_derived_variables(regime):
"""
Finds ordering of derived_variables.
@param regime: Dynamics Regime containing derived variables.
@type regime: lems.model.dynamics.regime
@return: Returns ordered list of derived variables.
@rtype: list(string)
@raise SimBuildError: Raised when a proper ordering of derived
variables could not be found.
"""
ordering = []
dvs = []
dvsnoexp = []
maxcount = 5
for dv in regime.derived_variables:
if dv.expression_tree == None:
dvsnoexp.append(dv.name)
else:
dvs.append(dv.name)
for dv in regime.conditional_derived_variables:
if len(dv.cases) == 0:
dvsnoexp.append(dv.name)
else:
dvs.append(dv.name)
count = maxcount
while count > 0 and dvs != []:
count = count - 1
for dv1 in dvs:
if dv1 in regime.derived_variables:
dv = regime.derived_variables[dv1]
else:
dv = regime.conditional_derived_variables[dv1]
found = False
if isinstance(dv, DerivedVariable):
exp_tree = dv.expression_tree
for dv2 in dvs:
if dv1 != dv2 and is_var_in_exp_tree(dv2, exp_tree):
found = True
else:
for case in dv.cases:
for dv2 in dvs:
if dv1 != dv2 and (is_var_in_exp_tree(dv2, case.condition_expression_tree) or
is_var_in_exp_tree(dv2, case.value_expression_tree)):
found = True
if not found:
ordering.append(dv1)
del dvs[dvs.index(dv1)]
count = maxcount
break
if count == 0:
raise SimBuildError(("Unable to find ordering for derived "
"variables in regime '{0}'").format(regime.name))
#return ordering + dvsnoexp
return dvsnoexp + ordering | 0.004699 |
def linear_elasticity(grid, spacing=None, E=1e5, nu=0.3, format=None):
"""Linear elasticity problem discretizes with Q1 finite elements on a regular rectangular grid.
Parameters
----------
grid : tuple
length 2 tuple of grid sizes, e.g. (10, 10)
spacing : tuple
length 2 tuple of grid spacings, e.g. (1.0, 0.1)
E : float
Young's modulus
nu : float
Poisson's ratio
format : string
Format of the returned sparse matrix (eg. 'csr', 'bsr', etc.)
Returns
-------
A : csr_matrix
FE Q1 stiffness matrix
B : array
rigid body modes
See Also
--------
linear_elasticity_p1
Notes
-----
- only 2d for now
Examples
--------
>>> from pyamg.gallery import linear_elasticity
>>> A, B = linear_elasticity((4, 4))
References
----------
.. [1] J. Alberty, C. Carstensen, S. A. Funken, and R. KloseDOI
"Matlab implementation of the finite element method in elasticity"
Computing, Volume 69, Issue 3 (November 2002) Pages: 239 - 263
http://www.math.hu-berlin.de/~cc/
"""
if len(grid) == 2:
return q12d(grid, spacing=spacing, E=E, nu=nu, format=format)
else:
raise NotImplemented('no support for grid=%s' % str(grid)) | 0.001523 |
async def request(self, api_commands):
"""Make a request."""
if not isinstance(api_commands, list):
result = await self._execute(api_commands)
return result
commands = (self._execute(api_command) for api_command in api_commands)
command_results = await asyncio.gather(*commands, loop=self._loop)
return command_results | 0.005208 |
def _parse_message(self, data):
"""Interpret each message datagram from device and do the needful.
This function receives datagrams from _assemble_buffer and inerprets
what they mean. It's responsible for maintaining the internal state
table for each device attribute and also for firing the update_callback
function (if one was supplied)
"""
recognized = False
newdata = False
if data.startswith('!I'):
self.log.warning('Invalid command: %s', data[2:])
recognized = True
elif data.startswith('!R'):
self.log.warning('Out-of-range command: %s', data[2:])
recognized = True
elif data.startswith('!E'):
self.log.warning('Cannot execute recognized command: %s', data[2:])
recognized = True
elif data.startswith('!Z'):
self.log.warning('Ignoring command for powered-off zone: %s', data[2:])
recognized = True
else:
for key in LOOKUP:
if data.startswith(key):
recognized = True
value = data[len(key):]
oldvalue = getattr(self, '_'+key)
if oldvalue != value:
changeindicator = 'New Value'
newdata = True
else:
changeindicator = 'Unchanged'
if key in LOOKUP:
if 'description' in LOOKUP[key]:
if value in LOOKUP[key]:
self.log.info('%s: %s (%s) -> %s (%s)',
changeindicator,
LOOKUP[key]['description'], key,
LOOKUP[key][value], value)
else:
self.log.info('%s: %s (%s) -> %s',
changeindicator,
LOOKUP[key]['description'], key,
value)
else:
self.log.info('%s: %s -> %s', changeindicator, key, value)
setattr(self, '_'+key, value)
if key == 'Z1POW' and value == '1' and oldvalue == '0':
self.log.info('Power on detected, refreshing all attributes')
self._poweron_refresh_successful = False
self._loop.call_later(1, self.poweron_refresh)
if key == 'Z1POW' and value == '0' and oldvalue == '1':
self._poweron_refresh_successful = False
break
if data.startswith('ICN'):
self.log.warning('ICN update received')
recognized = True
self._populate_inputs(int(value))
if data.startswith('ISN'):
recognized = True
self._poweron_refresh_successful = True
input_number = int(data[3:5])
value = data[5:]
oldname = self._input_names.get(input_number, '')
if oldname != value:
self._input_numbers[value] = input_number
self._input_names[input_number] = value
self.log.info('New Value: Input %d is called %s', input_number, value)
newdata = True
if newdata:
if self._update_callback:
self._loop.call_soon(self._update_callback, data)
else:
self.log.debug('no new data encountered')
if not recognized:
self.log.warning('Unrecognized response: %s', data) | 0.001593 |
def __find_another_nearest_medoid(self, point_index, current_medoid_index):
"""!
@brief Finds the another nearest medoid for the specified point that is differ from the specified medoid.
@param[in] point_index: index of point in dataspace for that searching of medoid in current list of medoids is perfomed.
@param[in] current_medoid_index: index of medoid that shouldn't be considered as a nearest.
@return (uint) index of the another nearest medoid for the point.
"""
other_medoid_index = -1
other_distance_nearest = float('inf')
for index_medoid in self.__current:
if (index_medoid != current_medoid_index):
other_distance_candidate = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[current_medoid_index])
if other_distance_candidate < other_distance_nearest:
other_distance_nearest = other_distance_candidate
other_medoid_index = index_medoid
return other_medoid_index | 0.010508 |
def makeLambdaPicklable(lambda_function):
"""Take input lambda function l and makes it picklable."""
if isinstance(lambda_function,
type(lambda: None)) and lambda_function.__name__ == '<lambda>':
def __reduce_ex__(proto):
# TODO: argdefs, closure
return unpickleLambda, (marshal.dumps(lambda_function.__code__), )
lambda_function.__reduce_ex__ = __reduce_ex__
return lambda_function | 0.004425 |
def show_raslog_output_cmd_status_error_msg(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_raslog = ET.Element("show_raslog")
config = show_raslog
output = ET.SubElement(show_raslog, "output")
cmd_status_error_msg = ET.SubElement(output, "cmd-status-error-msg")
cmd_status_error_msg.text = kwargs.pop('cmd_status_error_msg')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003914 |
def remove_game_containers(name_filter: str) -> None:
"""
:raises docker.exceptions.APIError
"""
for container in docker_client.containers.list(filters={"name": name_filter}, all=True):
container.stop()
container.remove() | 0.007905 |
def match(self, p_todo):
"""
Performs a match on a priority in the todo.
It gets priority from p_todo and compares it with user-entered
expression based on the given operator (default ==). It does that however
in reversed order to obtain more intuitive result. Example: (>B) will
match todos with priority (A).
Items without priority are designated with corresponding operand set to
'ZZ', because python doesn't allow NoneType() and str() comparisons.
"""
operand1 = self.value
operand2 = p_todo.priority() or 'ZZ'
return self.compare_operands(operand1, operand2) | 0.004539 |
def _warnCount(self, warnings, warningCount=None):
"""
Calculate the count of each warning, being given a list of them.
@param warnings: L{list} of L{dict}s that come from
L{tools.parsePyLintWarnings}.
@param warningCount: A L{dict} produced by this method previously, if
you are adding to the warnings.
@return: L{dict} of L{dict}s for the warnings.
"""
if not warningCount:
warningCount = {}
for warning in warnings:
wID = warning["warning_id"]
if not warningCount.get(wID):
warningCount[wID] = {}
warningCount[wID]["count"] = 1
warningCount[wID]["message"] = warning.get("warning_message")
else:
warningCount[wID]["count"] += 1
return warningCount | 0.00232 |
def nsDefs(self):
"""Get the namespace of a node """
ret = libxml2mod.xmlNodeGetNsDefs(self._o)
if ret is None:return None
__tmp = xmlNs(_obj=ret)
return __tmp | 0.020101 |
def download_as_file(self, url: str, folder: Path, name: str, delay: float = 0) -> str:
"""
Download the given url to the given target folder.
:param url: link
:type url: str
:param folder: target folder
:type folder: ~pathlib.Path
:param name: target file name
:type name: str
:param delay: after download wait in seconds
:type delay: float
:return: url
:rtype: str
:raises ~urllib3.exceptions.HTTPError: if the connection has an error
"""
while folder.joinpath(name).exists(): # TODO: handle already existing files
self.log.warning('already exists: ' + name)
name = name + '_d'
with self._downloader.request('GET', url, preload_content=False, retries=urllib3.util.retry.Retry(3)) as reader:
if reader.status == 200:
with folder.joinpath(name).open(mode='wb') as out_file:
out_file.write(reader.data)
else:
raise HTTPError(f"{url} | {reader.status}")
if delay > 0:
time.sleep(delay)
return url | 0.004329 |
def use_dill( self ):
"""Make the cluster use Dill as pickler for transferring results. This isn't
generally needed, but is sometimes useful for particularly complex experiments
such as those involving closures. (Or, to put it another way, if you find yourself
tempted to use this method, consider re-structuring your experiment code.)"""
self.open()
with self.sync_imports(quiet = True):
import dill
self._client.direct_view().use_dill() | 0.019763 |
def setup_and_run_analysis(self):
"""Execute analysis after the tab is displayed.
Please check the code in dock.py accept(). It should follow
approximately the same code.
"""
self.show_busy()
# Read user's settings
self.read_settings()
# Prepare impact function from wizard dialog user input
self.impact_function = self.prepare_impact_function()
# Prepare impact function
status, message = self.impact_function.prepare()
message = basestring_to_message(message)
# Check status
if status == PREPARE_FAILED_BAD_INPUT:
self.hide_busy()
LOGGER.warning(tr(
'The impact function will not be able to run because of the '
'inputs.'))
LOGGER.warning(message.to_text())
send_error_message(self, message)
return status, message
if status == PREPARE_FAILED_BAD_CODE:
self.hide_busy()
LOGGER.warning(tr(
'The impact function was not able to be prepared because of a '
'bug.'))
LOGGER.exception(message.to_text())
send_error_message(self, message)
return status, message
# Start the analysis
status, message = self.impact_function.run()
message = basestring_to_message(message)
# Check status
if status == ANALYSIS_FAILED_BAD_INPUT:
self.hide_busy()
LOGGER.warning(tr(
'The impact function could not run because of the inputs.'))
LOGGER.warning(message.to_text())
send_error_message(self, message)
return status, message
elif status == ANALYSIS_FAILED_BAD_CODE:
self.hide_busy()
LOGGER.warning(tr(
'The impact function could not run because of a bug.'))
LOGGER.exception(message.to_text())
send_error_message(self, message)
return status, message
LOGGER.info(tr('The impact function could run without errors.'))
# Add result layer to QGIS
add_impact_layers_to_canvas(
self.impact_function, iface=self.parent.iface)
# Some if-s i.e. zoom, debug, hide exposure
if self.zoom_to_impact_flag:
self.iface.zoomToActiveLayer()
qgis_exposure = (
QgsProject.instance().mapLayer(
self.parent.exposure_layer.id()))
if self.hide_exposure_flag and qgis_exposure is not None:
treeroot = QgsProject.instance().layerTreeRoot()
treelayer = treeroot.findLayer(qgis_exposure.id())
if treelayer:
treelayer.setItemVisibilityChecked(False)
# we only want to generate non pdf/qpt report
html_components = [standard_impact_report_metadata_html]
error_code, message = self.impact_function.generate_report(
html_components)
message = basestring_to_message(message)
if error_code == ImpactReport.REPORT_GENERATION_FAILED:
self.hide_busy()
LOGGER.info(tr(
'The impact report could not be generated.'))
send_error_message(self, message)
LOGGER.exception(message.to_text())
return ANALYSIS_FAILED_BAD_CODE, message
self.extent.set_last_analysis_extent(
self.impact_function.analysis_extent,
qgis_exposure.crs())
# Hide busy
self.hide_busy()
# Setup gui if analysis is done
self.setup_gui_analysis_done()
return ANALYSIS_SUCCESS, None | 0.000546 |
def next_close(self, dt):
"""
Given a dt, returns the next close.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next close.
Returns
-------
pd.Timestamp
The UTC timestamp of the next close.
"""
idx = next_divider_idx(self.market_closes_nanos, dt.value)
return pd.Timestamp(self.market_closes_nanos[idx], tz=UTC) | 0.004484 |
def is_punctuation(text):
"""Check if given string is a punctuation"""
return not (text.lower() in config.AVRO_VOWELS or
text.lower() in config.AVRO_CONSONANTS) | 0.005435 |
def assure_migrations_table_setup(db):
"""
Make sure the migrations table is set up in the database.
"""
from mig.models import MigrationData
if not MigrationData.__table__.exists(db.bind):
MigrationData.metadata.create_all(
db.bind, tables=[MigrationData.__table__]) | 0.003247 |
def gep(self, indices):
"""
Call getelementptr on this pointer constant.
"""
if not isinstance(self.type, types.PointerType):
raise TypeError("can only call gep() on pointer constants, not '%s'"
% (self.type,))
outtype = self.type
for i in indices:
outtype = outtype.gep(i)
strindices = ["{0} {1}".format(idx.type, idx.get_reference())
for idx in indices]
op = "getelementptr ({0}, {1} {2}, {3})".format(
self.type.pointee, self.type,
self.get_reference(), ', '.join(strindices))
return FormattedConstant(outtype.as_pointer(self.addrspace), op) | 0.004184 |
def get_assessment_taken_query_session_for_bank(self, bank_id):
"""Gets the ``OsidSession`` associated with the assessment taken query service for the given bank.
arg: bank_id (osid.id.Id): the ``Id`` of the bank
return: (osid.assessment.AssessmentTakenQuerySession) - an
``AssessmentTakenQuerySession``
raise: NotFound - ``bank_id`` not found
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_assessment_taken_query()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_taken_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
if not self.supports_assessment_taken_query():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.AssessmentTakenQuerySession(bank_id, runtime=self._runtime) | 0.004263 |
def reencrypt_row_content(db,
table,
row_id,
decrypt_func,
encrypt_func,
logger):
"""
Re-encrypt a row from ``table`` with ``id`` of ``row_id``.
"""
q = (select([table.c.content])
.with_for_update()
.where(table.c.id == row_id))
[(content,)] = db.execute(q)
logger.info("Begin encrypting %s row %s.", table.name, row_id)
db.execute(
table
.update()
.where(table.c.id == row_id)
.values(content=encrypt_func(decrypt_func(content)))
)
logger.info("Done encrypting %s row %s.", table.name, row_id) | 0.001404 |
def msg_nocr(self, msg, opts={}):
""" Convenience short-hand for self.debugger.intf[-1].msg_nocr """
try:
return(self.debugger.intf[-1].msg_nocr(msg))
except EOFError:
# FIXME: what do we do here?
pass
return None | 0.007117 |
def astext(data):
"""
Given a unicode/str/bytes always return str.
We prefer to work with the 'native' string type for the version of python
we run on, and this gets us that.
"""
if isinstance(data, str):
return data
elif isinstance(data, text_type):
return data.encode("utf-8", "ignore")
elif isinstance(data, binary_type):
return data.decode("utf-8", "ignore")
raise TypeError('{!r} not a string'.format(data)) | 0.002114 |
def builtin_lookup(name):
"""lookup a name into the builtin module
return the list of matching statements and the astroid for the builtin
module
"""
builtin_astroid = MANAGER.ast_from_module(builtins)
if name == "__dict__":
return builtin_astroid, ()
try:
stmts = builtin_astroid.locals[name]
except KeyError:
stmts = ()
return builtin_astroid, stmts | 0.002439 |
def generate(self, text):
"""Generate and save avatars, return a list of file name: [filename_s, filename_m, filename_l].
:param text: The text used to generate image.
"""
sizes = current_app.config['AVATARS_SIZE_TUPLE']
path = current_app.config['AVATARS_SAVE_PATH']
suffix = {sizes[0]: 's', sizes[1]: 'm', sizes[2]: 'l'}
for size in sizes:
image_byte_array = self.get_image(
string=str(text),
width=int(size),
height=int(size),
pad=int(size * 0.1))
self.save(image_byte_array, save_location=os.path.join(path, '%s_%s.png' % (text, suffix[size])))
return [text + '_s.png', text + '_m.png', text + '_l.png'] | 0.005256 |
def get_or_create(cls, **kwargs):
"""
Implements get_or_create logic for models that inherit from
representatives.models.HashableModel because we don't have access to model
methods in a migration scenario.
"""
try:
obj = cls.objects.get(**kwargs)
created = False
except cls.DoesNotExist:
obj = cls(**kwargs)
created = True
calculate_hash(obj)
obj.save()
return (obj, created) | 0.002188 |
def del_record(cls, fqdn, name, type):
"""Delete record for a domain."""
meta = cls.get_fqdn_info(fqdn)
url = meta['domain_records_href']
delete_url = url
if name:
delete_url = '%s/%s' % (delete_url, name)
if type:
delete_url = '%s/%s' % (delete_url, type)
return cls.json_delete(delete_url) | 0.005391 |
def cutadaptit_single(data, sample):
"""
Applies quality and adapter filters to reads using cutadapt. If the ipyrad
filter param is set to 0 then it only filters to hard trim edges and uses
mintrimlen. If filter=1, we add quality filters. If filter=2 we add
adapter filters.
"""
sname = sample.name
## if (GBS, ddRAD) we look for the second cut site + adapter. For single-end
## data we don't bother trying to remove the second barcode since it's not
## as critical as with PE data.
if data.paramsdict["datatype"] == "rad":
adapter = data._hackersonly["p3_adapter"]
else:
## if GBS then the barcode can also be on the other side.
if data.paramsdict["datatype"] == "gbs":
## make full adapter (-revcompcut-revcompbarcode-adapter)
## and add adapter without revcompbarcode
if data.barcodes:
adapter = \
fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \
+ fullcomp(data.barcodes[sample.name])[::-1] \
+ data._hackersonly["p3_adapter"]
## add incomplete adapter to extras (-recompcut-adapter)
data._hackersonly["p3_adapters_extra"].append(
fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \
+ data._hackersonly["p3_adapter"])
else:
LOGGER.warning("No barcode information present, and is therefore not "+\
"being used for adapter trimming of SE gbs data.")
## else no search for barcodes on 3'
adapter = \
fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \
+ data._hackersonly["p3_adapter"]
else:
adapter = \
fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \
+ data._hackersonly["p3_adapter"]
## get length trim parameter from new or older version of ipyrad params
trim5r1 = trim3r1 = []
if data.paramsdict.get("trim_reads"):
trimlen = data.paramsdict.get("trim_reads")
## trim 5' end
if trimlen[0]:
trim5r1 = ["-u", str(trimlen[0])]
if trimlen[1] < 0:
trim3r1 = ["-u", str(trimlen[1])]
if trimlen[1] > 0:
trim3r1 = ["--length", str(trimlen[1])]
else:
trimlen = data.paramsdict.get("edit_cutsites")
trim5r1 = ["--cut", str(trimlen[0])]
## testing new 'trim_reads' setting
cmdf1 = ["cutadapt"]
if trim5r1:
cmdf1 += trim5r1
if trim3r1:
cmdf1 += trim3r1
cmdf1 += ["--minimum-length", str(data.paramsdict["filter_min_trim_len"]),
"--max-n", str(data.paramsdict["max_low_qual_bases"]),
"--trim-n",
"--output", OPJ(data.dirs.edits, sname+".trimmed_R1_.fastq.gz"),
sample.files.concat[0][0]]
if int(data.paramsdict["filter_adapters"]):
## NEW: only quality trim the 3' end for SE data.
cmdf1.insert(1, "20")
cmdf1.insert(1, "-q")
cmdf1.insert(1, str(data.paramsdict["phred_Qscore_offset"]))
cmdf1.insert(1, "--quality-base")
## if filter_adapters==3 then p3_adapters_extra will already have extra
## poly adapters added to its list.
if int(data.paramsdict["filter_adapters"]) > 1:
## first enter extra cuts (order of input is reversed)
for extracut in list(set(data._hackersonly["p3_adapters_extra"]))[::-1]:
cmdf1.insert(1, extracut)
cmdf1.insert(1, "-a")
## then put the main cut so it appears first in command
cmdf1.insert(1, adapter)
cmdf1.insert(1, "-a")
## do modifications to read1 and write to tmp file
LOGGER.info(cmdf1)
proc1 = sps.Popen(cmdf1, stderr=sps.STDOUT, stdout=sps.PIPE, close_fds=True)
try:
res1 = proc1.communicate()[0]
except KeyboardInterrupt:
proc1.kill()
raise KeyboardInterrupt
## raise errors if found
if proc1.returncode:
raise IPyradWarningExit(" error in {}\n {}".format(" ".join(cmdf1), res1))
## return result string to be parsed outside of engine
return res1 | 0.010087 |
def si_prefix(value):
""" By Forrest Green (2010)"""
#standard si prefixes
prefixes = ['y','z','a','f','p','n','u','m','','k','M','G','T','P','E','Z','Y']
from math import log
#closest 1000 exponent
if value == 0: return (value, "")
exp = int(log(value,1000)//1) + 8
if exp < 0: exp = 0
if exp > 16: exp = 16
return (value*1000**(-(exp-8)), prefixes[exp]) | 0.060606 |
def synonyms(self):
"""Return a dict of term synonyms"""
syns = {}
for k, v in self._declared_terms.items():
k = k.strip()
if v.get('synonym'):
syns[k.lower()] = v['synonym']
if not '.' in k:
syns[ROOT_TERM + '.' + k.lower()] = v['synonym']
return syns | 0.008264 |
def load_balancer_delete(name, resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
Delete a load balancer.
:param name: The name of the load balancer to delete.
:param resource_group: The resource group name assigned to the
load balancer.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.load_balancer_delete testlb testgroup
'''
result = False
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
load_balancer = netconn.load_balancers.delete(
load_balancer_name=name,
resource_group_name=resource_group
)
load_balancer.wait()
result = True
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
return result | 0.001214 |
def requires_authentication(fn):
"""
Requires that the calling Subject be authenticated before allowing access.
"""
@functools.wraps(fn)
def wrap(*args, **kwargs):
subject = WebYosai.get_current_subject()
if not subject.authenticated:
msg = "The current Subject is not authenticated. ACCESS DENIED."
raise WebYosai.get_current_webregistry().raise_unauthorized(msg)
return fn(*args, **kwargs)
return wrap | 0.00956 |
def accept_transfer(transfer, comment=None):
'''Accept an incoming a transfer request'''
TransferResponsePermission(transfer).test()
transfer.responded = datetime.now()
transfer.responder = current_user._get_current_object()
transfer.status = 'accepted'
transfer.response_comment = comment
transfer.save()
subject = transfer.subject
recipient = transfer.recipient
if isinstance(recipient, Organization):
subject.organization = recipient
elif isinstance(recipient, User):
subject.owner = recipient
subject.save()
return transfer | 0.001669 |
def is_authenticated(user):
"""Return whether or not a User is authenticated.
Function provides compatibility following deprecation of method call to
`is_authenticated()` in Django 2.0.
This is *only* required to support Django < v1.10 (i.e. v1.9 and earlier),
as `is_authenticated` was introduced as a property in v1.10.s
"""
if not hasattr(user, 'is_authenticated'):
return False
if callable(user.is_authenticated):
# Will be callable if django.version < 2.0, but is only necessary in
# v1.9 and earlier due to change introduced in v1.10 making
# `is_authenticated` a property instead of a callable.
return user.is_authenticated()
else:
return user.is_authenticated | 0.00266 |
def delete(self, user):
"""Delete a resource"""
if user:
can_delete = yield self.can_delete(user)
else:
can_delete = False
if not can_delete:
raise exceptions.Unauthorized('User may not delete the resource')
doc = {
'_id': self.id,
'_deleted': True
}
try:
doc['_rev'] = self._rev
except AttributeError:
pass
db = self.db_client()
yield db.save_doc(doc)
self._resource = doc | 0.003636 |
def sudo_remove_dirtree(dir_name):
"""Removes directory tree as a superuser.
Args:
dir_name: name of the directory to remove.
This function is necessary to cleanup directories created from inside a
Docker, since they usually written as a root, thus have to be removed as a
root.
"""
try:
subprocess.check_output(['sudo', 'rm', '-rf', dir_name])
except subprocess.CalledProcessError as e:
raise WorkerError('Can''t remove directory {0}'.format(dir_name), e) | 0.00823 |
def _assemble_gef(stmt):
"""Assemble Gef statements into text."""
subj_str = _assemble_agent_str(stmt.gef)
obj_str = _assemble_agent_str(stmt.ras)
stmt_str = subj_str + ' is a GEF for ' + obj_str
return _make_sentence(stmt_str) | 0.004049 |
def authenticate_swift_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with swift api."""
self.log.debug('Authenticating swift user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
interface='publicURL')
if keystone.session:
return swiftclient.Connection(session=keystone.session)
else:
return swiftclient.Connection(authurl=ep,
user=user,
key=password,
tenant_name=tenant,
auth_version='2.0') | 0.00271 |
def model_fields(model, allow_pk=False, only=None, exclude=None,
field_args=None, converter=None):
"""
Generate a dictionary of fields for a given Peewee model.
See `model_form` docstring for description of parameters.
"""
converter = converter or ModelConverter()
field_args = field_args or {}
model_fields = list(model._meta.sorted_fields)
if not allow_pk:
model_fields.pop(0)
if only:
model_fields = [x for x in model_fields if x.name in only]
elif exclude:
model_fields = [x for x in model_fields if x.name not in exclude]
field_dict = {}
for model_field in model_fields:
name, field = converter.convert(
model,
model_field,
field_args.get(model_field.name))
field_dict[name] = field
return field_dict | 0.00117 |
def create_port_binding(self, port, host):
"""Enqueue port binding create"""
if not self.get_instance_type(port):
return
for pb_key in self._get_binding_keys(port, host):
pb_res = MechResource(pb_key, a_const.PORT_BINDING_RESOURCE,
a_const.CREATE)
self.provision_queue.put(pb_res) | 0.005348 |
def encrypt_text(self, text, *args, **kwargs):
"""
Encrypt a string.
input: unicode str, output: unicode str
"""
b = text.encode("utf-8")
token = self.encrypt(b, *args, **kwargs)
return base64.b64encode(token).decode("utf-8") | 0.007092 |
def _capture(f, t, t0, factor):
'''
capture signal and return its standard deviation
#TODO: more detail
'''
n_per_sec = len(t) / t[-1]
# len of one split:
n = int(t0 * factor * n_per_sec)
s = len(f) // n
m = s * n
f = f[:m]
ff = np.split(f, s)
m = np.mean(ff, axis=1)
return np.std(m) | 0.002833 |
def remove_mock(self, mock):
"""
Removes a specific mock instance by object reference.
Arguments:
mock (pook.Mock): mock instance to remove.
"""
self.mocks = [m for m in self.mocks if m is not mock] | 0.007968 |
def get_endpoint(name, tags=None, region=None, key=None, keyid=None,
profile=None):
'''
Return the endpoint of an RDS instance.
CLI example::
salt myminion boto_rds.get_endpoint myrds
'''
endpoint = False
res = __salt__['boto_rds.exists'](name, tags, region, key, keyid,
profile)
if res.get('exists'):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if conn:
rds = conn.describe_db_instances(DBInstanceIdentifier=name)
if rds and 'Endpoint' in rds['DBInstances'][0]:
endpoint = rds['DBInstances'][0]['Endpoint']['Address']
return endpoint
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return endpoint | 0.002268 |
def create_mapping(self, mapped_class, configuration=None):
"""
Creates a new mapping for the given mapped class and representer
configuration.
:param configuration: configuration for the new data element class.
:type configuration: :class:`RepresenterConfiguration`
:returns: newly created instance of :class:`Mapping`
"""
cfg = self.__configuration.copy()
if not configuration is None:
cfg.update(configuration)
provided_ifcs = provided_by(object.__new__(mapped_class))
if IMemberResource in provided_ifcs:
base_data_element_class = self.member_data_element_base_class
elif ICollectionResource in provided_ifcs:
base_data_element_class = self.collection_data_element_base_class
elif IResourceLink in provided_ifcs:
base_data_element_class = self.linked_data_element_base_class
else:
raise ValueError('Mapped class for data element class does not '
'implement one of the required interfaces.')
name = "%s%s" % (mapped_class.__name__,
base_data_element_class.__name__)
de_cls = type(name, (base_data_element_class,), {})
mp = self.mapping_class(self, mapped_class, de_cls, cfg)
# Set the data element class' mapping.
# FIXME: This looks like a hack.
de_cls.mapping = mp
return mp | 0.002052 |
def diffuser_curved(Di1, Di2, l):
r'''Returns loss coefficient for any curved wall pipe expansion
as shown in [1]_.
.. math::
K_1 = \phi(1.43-1.3\beta^2)(1-\beta^2)^2
.. math::
\phi = 1.01 - 0.624\frac{l}{d_1} + 0.30\left(\frac{l}{d_1}\right)^2
- 0.074\left(\frac{l}{d_1}\right)^3 + 0.0070\left(\frac{l}{d_1}\right)^4
.. figure:: fittings/curved_wall_diffuser.png
:scale: 25 %
:alt: diffuser curved; after [1]_
Parameters
----------
Di1 : float
Inside diameter of original pipe (smaller), [m]
Di2 : float
Inside diameter of following pipe (larger), [m]
l : float
Length of the curve along the pipe axis, [m]
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
Beta^2 should be between 0.1 and 0.9.
A small mismatch between tabulated values of this function in table 11.3
is observed with the equation presented.
Examples
--------
>>> diffuser_curved(Di1=.25**0.5, Di2=1., l=2.)
0.2299781250000002
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
'''
beta = Di1/Di2
phi = 1.01 - 0.624*l/Di1 + 0.30*(l/Di1)**2 - 0.074*(l/Di1)**3 + 0.0070*(l/Di1)**4
return phi*(1.43 - 1.3*beta**2)*(1 - beta**2)**2 | 0.002831 |
def cnxml_to_html(cnxml_source):
"""Transform the CNXML source to HTML"""
source = _string2io(cnxml_source)
xml = etree.parse(source)
# Run the CNXML to HTML transform
xml = _transform('cnxml-to-html5.xsl', xml,
version='"{}"'.format(version))
xml = XHTML_MODULE_BODY_XPATH(xml)
return etree.tostring(xml[0]) | 0.002801 |
def extract_files(file_paths, target_path):
""" Unpack all files to the given path. """
os.makedirs(target_path, exist_ok=True)
extracted = []
for file_path in file_paths:
with tarfile.open(file_path, 'r') as archive:
archive.extractall(target_path)
file_name = os.path.splitext(os.path.basename(file_path))[0]
extracted.append(os.path.join(target_path, file_name))
return extracted | 0.004193 |
def fullConn (self, preCellsTags, postCellsTags, connParam):
from .. import sim
''' Generates connections between all pre and post-syn cells '''
if sim.cfg.verbose: print('Generating set of all-to-all connections (rule: %s) ...' % (connParam['label']))
# get list of params that have a lambda function
paramsStrFunc = [param for param in [p+'Func' for p in self.connStringFuncParams] if param in connParam]
for paramStrFunc in paramsStrFunc:
# replace lambda function (with args as dict of lambda funcs) with list of values
connParam[paramStrFunc[:-4]+'List'] = {(preGid,postGid): connParam[paramStrFunc](**{k:v if isinstance(v, Number) else v(preCellTags,postCellTags) for k,v in connParam[paramStrFunc+'Vars'].items()})
for preGid,preCellTags in preCellsTags.items() for postGid,postCellTags in postCellsTags.items()}
for postCellGid in postCellsTags: # for each postsyn cell
if postCellGid in self.gid2lid: # check if postsyn is in this node's list of gids
for preCellGid, preCellTags in preCellsTags.items(): # for each presyn cell
self._addCellConn(connParam, preCellGid, postCellGid) | 0.016736 |
def get_config():
'''Gather and sanity-check volume configuration data'''
volume_config = {}
config = hookenv.config()
errors = False
if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
volume_config['ephemeral'] = True
else:
volume_config['ephemeral'] = False
try:
volume_map = yaml.safe_load(config.get('volume-map', '{}'))
except yaml.YAMLError as e:
hookenv.log("Error parsing YAML volume-map: {}".format(e),
hookenv.ERROR)
errors = True
if volume_map is None:
# probably an empty string
volume_map = {}
elif not isinstance(volume_map, dict):
hookenv.log("Volume-map should be a dictionary, not {}".format(
type(volume_map)))
errors = True
volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
if volume_config['device'] and volume_config['ephemeral']:
# asked for ephemeral storage but also defined a volume ID
hookenv.log('A volume is defined for this unit, but ephemeral '
'storage was requested', hookenv.ERROR)
errors = True
elif not volume_config['device'] and not volume_config['ephemeral']:
# asked for permanent storage but did not define volume ID
hookenv.log('Ephemeral storage was requested, but there is no volume '
'defined for this unit.', hookenv.ERROR)
errors = True
unit_mount_name = hookenv.local_unit().replace('/', '-')
volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
if errors:
return None
return volume_config | 0.000597 |
def make_tensor_value_info(
name, # type: Text
elem_type, # type: int
shape, # type: Optional[Sequence[Union[Text, int]]]
doc_string="", # type: Text
shape_denotation=None, # type: Optional[List[Text]]
): # type: (...) -> ValueInfoProto
"""Makes a ValueInfoProto based on the data type and shape."""
value_info_proto = ValueInfoProto()
value_info_proto.name = name
if doc_string:
value_info_proto.doc_string = doc_string
tensor_type_proto = value_info_proto.type.tensor_type
tensor_type_proto.elem_type = elem_type
tensor_shape_proto = tensor_type_proto.shape
if shape is not None:
# You might think this is a no-op (extending a normal Python
# list by [] certainly is), but protobuf lists work a little
# differently; if a field is never set, it is omitted from the
# resulting protobuf; a list that is explicitly set to be
# empty will get an (empty) entry in the protobuf. This
# difference is visible to our consumers, so make sure we emit
# an empty shape!
tensor_shape_proto.dim.extend([])
if shape_denotation:
if len(shape_denotation) != len(shape):
raise ValueError(
'Invalid shape_denotation. '
'Must be of the same length as shape.')
for i, d in enumerate(shape):
dim = tensor_shape_proto.dim.add()
if d is None:
pass
elif isinstance(d, integer_types):
dim.dim_value = d
elif isinstance(d, text_type):
dim.dim_param = d
else:
raise ValueError(
'Invalid item in shape: {}. '
'Needs to of integer_types or text_type.'.format(d))
if shape_denotation:
dim.denotation = shape_denotation[i]
return value_info_proto | 0.000512 |
def addReward(self, r=None):
""" A filtered mapping towards performAction of the underlying
environment.
"""
r = self.getReward() if r is None else r
# by default, the cumulative reward is just the sum over the episode
if self.discount:
self.cumulativeReward += power(self.discount, self.samples) * r
else:
self.cumulativeReward += r | 0.004785 |
def start_output (self):
"""Write start of checking info."""
super(HtmlLogger, self).start_output()
header = {
"encoding": self.get_charset_encoding(),
"title": configuration.App,
"body": self.colorbackground,
"link": self.colorlink,
"vlink": self.colorlink,
"alink": self.colorlink,
"url": self.colorurl,
"error": self.colorerror,
"valid": self.colorok,
"warning": self.colorwarning,
}
self.write(HTML_HEADER % header)
self.comment("Generated by %s" % configuration.App)
if self.has_part('intro'):
self.write(u"<h2>"+configuration.App+
"</h2><br/><blockquote>"+
configuration.Freeware+"<br/><br/>"+
(_("Start checking at %s") %
strformat.strtime(self.starttime))+
os.linesep+"<br/>")
self.check_date()
self.flush() | 0.007707 |
def availability(self, availability):
"""Sets the availability of this Product.
:param availability: The availability of this Product.
:type: str
"""
allowed_values = ["available", "comingSoon", "retired"]
if availability is not None and availability not in allowed_values:
raise ValueError(
"Invalid value for `availability` ({0}), must be one of {1}"
.format(availability, allowed_values)
)
self._availability = availability | 0.003697 |
def deserialize_from_text(cls, data, content_type=None):
# type: (Optional[Union[AnyStr, IO]], Optional[str]) -> Any
"""Decode data according to content-type.
Accept a stream of data as well, but will be load at once in memory for now.
If no content-type, will return the string version (not bytes, not stream)
:param data: Input, could be bytes or stream (will be decoded with UTF8) or text
:type data: str or bytes or IO
:param str content_type: The content type.
"""
if hasattr(data, 'read'):
# Assume a stream
data = cast(IO, data).read()
if isinstance(data, bytes):
data_as_str = data.decode(encoding='utf-8-sig')
else:
# Explain to mypy the correct type.
data_as_str = cast(str, data)
# Remove Byte Order Mark if present in string
data_as_str = data_as_str.lstrip(_BOM)
if content_type is None:
return data
if content_type in cls.JSON_MIMETYPES:
try:
return json.loads(data_as_str)
except ValueError as err:
raise DeserializationError("JSON is invalid: {}".format(err), err)
elif "xml" in (content_type or []):
try:
return ET.fromstring(data_as_str)
except ET.ParseError:
# It might be because the server has an issue, and returned JSON with
# content-type XML....
# So let's try a JSON load, and if it's still broken
# let's flow the initial exception
def _json_attemp(data):
try:
return True, json.loads(data)
except ValueError:
return False, None # Don't care about this one
success, json_result = _json_attemp(data)
if success:
return json_result
# If i'm here, it's not JSON, it's not XML, let's scream
# and raise the last context in this block (the XML exception)
# The function hack is because Py2.7 messes up with exception
# context otherwise.
_LOGGER.critical("Wasn't XML not JSON, failing")
raise_with_traceback(DeserializationError, "XML is invalid")
raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) | 0.004014 |
def _get_action(trans):
"""
Return the action inferred from the transformation `trans`.
and the parameter going with this action
An _Action.ADD_MARK goes with a Mark
while an _Action.ADD_ACCENT goes with an Accent
"""
# TODO: VIQR-like convention
mark_action = {
'^': (_Action.ADD_MARK, Mark.HAT),
'+': (_Action.ADD_MARK, Mark.BREVE),
'*': (_Action.ADD_MARK, Mark.HORN),
'-': (_Action.ADD_MARK, Mark.BAR),
}
accent_action = {
'\\': (_Action.ADD_ACCENT, Accent.GRAVE),
'/': (_Action.ADD_ACCENT, Accent.ACUTE),
'?': (_Action.ADD_ACCENT, Accent.HOOK),
'~': (_Action.ADD_ACCENT, Accent.TIDLE),
'.': (_Action.ADD_ACCENT, Accent.DOT),
}
if trans[0] in ('<', '+'):
return _Action.ADD_CHAR, trans[1]
if trans[0] == "_":
return _Action.UNDO, trans[1:]
if len(trans) == 2:
return mark_action[trans[1]]
else:
return accent_action[trans[0]] | 0.001006 |
def parse_gaf(path_or_buffer, gene_ontology, valid_genes=None,
db=None, ev_codes=None):
"""Parse a GAF 2.1 file containing GO annotations.
Parameters
----------
path_or_buffer : str or buffer
The GAF file.
gene_ontology : `GeneOntology`
The Gene Ontology.
valid_genes : Iterable of str, optional
A list of valid gene names. [None]
db : str, optional
Select only annotations with this "DB"" value. [None]
ev_codes : str or set of str, optional
Select only annotations with this/these evidence codes. [None]
Returns
-------
list of `GOAnnotation`
The list of GO annotations.
"""
#if path == '-':
# path = sys.stdin
assert isinstance(gene_ontology, GeneOntology)
if db is not None:
assert isinstance(db, (str, _oldstr))
if (ev_codes is not None) and ev_codes:
assert isinstance(ev_codes, (str, _oldstr)) or \
isinstance(ev_codes, Iterable)
if isinstance(ev_codes, str):
ev_codes = set([ev_codes])
elif (ev_codes is not None) and ev_codes:
ev_codes = set(ev_codes)
else:
ev_codes = None
# open file, if necessary
if isinstance(path_or_buffer, (str, _oldstr)):
buffer = misc.gzip_open_text(path_or_buffer, encoding='ascii')
else:
buffer = path_or_buffer
if valid_genes is not None:
valid_genes = set(valid_genes)
# use pandas to parse the file quickly
df = pd.read_csv(buffer, sep='\t', comment='!', header=None, dtype=_oldstr)
# replace pandas' NaNs with empty strings
df.fillna('', inplace=True)
# exclude annotations with unknown Gene Ontology terms
all_go_term_ids = set(gene_ontology._term_dict.keys())
sel = df.iloc[:, 4].isin(all_go_term_ids)
logger.info(
'Ignoring %d / %d annotations (%.1f %%) with unknown GO terms.',
(~sel).sum(), sel.size, 100*((~sel).sum()/float(sel.size)))
df = df.loc[sel]
# filter rows for valid genes
if valid_genes is not None:
sel = df.iloc[:, 2].isin(valid_genes)
logger.info(
'Ignoring %d / %d annotations (%.1f %%) with unknown genes.',
(~sel).sum(), sel.size, 100*((~sel).sum()/float(sel.size)))
df = df.loc[sel]
# filter rows for DB value
if db is not None:
sel = (df.iloc[:, 0] == db)
logger.info(
'Excluding %d / %d annotations (%.1f %%) with wrong DB values.',
(~sel).sum(), sel.size, 100*((~sel).sum()/float(sel.size)))
df = df.loc[sel]
# filter rows for evidence value
if ev_codes is not None:
sel = (df.iloc[:, 6].isin(ev_codes))
logger.info(
'Excluding %d / %d annotations (%.1f %%) based on evidence code.',
(~sel).sum(), sel.size, 100*((~sel).sum()/float(sel.size)))
df = df.loc[sel]
# convert each row into a GOAnnotation object
go_annotations = []
for i, l in df.iterrows():
ann = GOAnnotation.from_list(gene_ontology, l.tolist())
go_annotations.append(ann)
logger.info('Read %d GO annotations.', len(go_annotations))
return go_annotations | 0.001248 |
def add_versioned_targets_to_INSTALLED_FILES(target, source, env):
""" An emitter that adds all target files to the list stored in the
_INSTALLED_FILES global variable. This way all installed files of one
scons call will be collected.
"""
global _INSTALLED_FILES, _UNIQUE_INSTALLED_FILES
Verbose = False
_INSTALLED_FILES.extend(target)
if Verbose:
print("add_versioned_targets_to_INSTALLED_FILES: target={:r}".format(list(map(str, target))))
symlinks = listShlibLinksToInstall(target[0], source, env)
if symlinks:
SCons.Tool.EmitLibSymlinks(env, symlinks, target[0])
_UNIQUE_INSTALLED_FILES = None
return (target, source) | 0.00292 |
def crick_angles(p, reference_axis, tag=True, reference_axis_name='ref_axis'):
"""Returns the Crick angle for each CA atom in the `Polymer`.
Notes
-----
The final value is in the returned list is `None`, since the angle
calculation requires pairs of points on both the primitive and
reference_axis.
Parameters
----------
p : ampal.Polymer
Reference `Polymer`.
reference_axis : list(numpy.array or tuple or list)
Length of reference_axis must equal length of the Polymer.
Each element of reference_axis represents a point in R^3.
tag : bool, optional
If `True`, tags the `Polymer` with the reference axis coordinates
and each Residue with its Crick angle. Crick angles are stored
at the Residue level, but are calculated using the CA atom.
reference_axis_name : str, optional
Used to name the keys in tags at Chain and Residue level.
Returns
-------
cr_angles : list(float)
The crick angles in degrees for each CA atom of the Polymer.
Raises
------
ValueError
If the Polymer and the reference_axis have unequal length.
"""
if not len(p) == len(reference_axis):
raise ValueError(
"The reference axis must contain the same number of points"
" as the Polymer primitive.")
prim_cas = p.primitive.coordinates
p_cas = p.get_reference_coords()
ref_points = reference_axis.coordinates
cr_angles = [
dihedral(ref_points[i], prim_cas[i], prim_cas[i + 1], p_cas[i])
for i in range(len(prim_cas) - 1)]
cr_angles.append(None)
if tag:
p.tags[reference_axis_name] = reference_axis
monomer_tag_name = 'crick_angle_{0}'.format(reference_axis_name)
for m, c in zip(p._monomers, cr_angles):
m.tags[monomer_tag_name] = c
return cr_angles | 0.000531 |
def order_lots(id_or_ins, amount, price=None, style=None):
"""
指定手数发送买/卖单。如有需要落单类型当做一个参量传入,如果忽略掉落单类型,那么默认是市价单(market order)。
:param id_or_ins: 下单标的物
:type id_or_ins: :class:`~Instrument` object | `str`
:param int amount: 下单量, 正数代表买入,负数代表卖出。将会根据一手xx股来向下调整到一手的倍数,比如中国A股就是调整成100股的倍数。
:param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。
:param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder`
:type style: `OrderStyle` object
:return: :class:`~Order` object | None
:example:
.. code-block:: python
#买入20手的平安银行股票,并且发送市价单:
order_lots('000001.XSHE', 20)
#买入10手平安银行股票,并且发送限价单,价格为¥10:
order_lots('000001.XSHE', 10, style=LimitOrder(10))
"""
order_book_id = assure_stock_order_book_id(id_or_ins)
round_lot = int(Environment.get_instance().get_instrument(order_book_id).round_lot)
style = cal_style(price, style)
return order_shares(id_or_ins, amount * round_lot, style=style) | 0.004859 |
def from_str(cls, s):
# type: (Union[Text, bytes]) -> FmtStr
r"""
Return a FmtStr representing input.
The str() of a FmtStr is guaranteed to produced the same FmtStr.
Other input with escape sequences may not be preserved.
>>> fmtstr("|"+fmtstr("hey", fg='red', bg='blue')+"|")
'|'+on_blue(red('hey'))+'|'
>>> fmtstr('|\x1b[31m\x1b[44mhey\x1b[49m\x1b[39m|')
'|'+on_blue(red('hey'))+'|'
"""
if '\x1b[' in s:
try:
tokens_and_strings = parse(s)
except ValueError:
return FmtStr(Chunk(remove_ansi(s)))
else:
chunks = []
cur_fmt = {}
for x in tokens_and_strings:
if isinstance(x, dict):
cur_fmt.update(x)
elif isinstance(x, (bytes, unicode)):
atts = parse_args('', dict((k, v)
for k, v in cur_fmt.items()
if v is not None))
chunks.append(Chunk(x, atts=atts))
else:
raise Exception("logic error")
return FmtStr(*chunks)
else:
return FmtStr(Chunk(s)) | 0.002262 |
def delete_key(key_name, region=None, key=None, keyid=None, profile=None):
'''
Deletes a key. Always returns True
CLI Example:
.. code-block:: bash
salt myminion boto_ec2.delete_key mykey
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
key = conn.delete_key_pair(key_name)
log.debug("the key to return is : %s", key)
return key
except boto.exception.BotoServerError as e:
log.debug(e)
return False | 0.001949 |
def pretty_print_str(self):
'''
Create a string to pretty-print this trie to standard output.
'''
retval = ''
# dfs
todo = [self.root]
while todo:
current = todo.pop()
for char in reversed(sorted(current.keys())):
todo.append(current[char])
indent = ' ' * (current.depth * 2)
retval += indent + current.__unicode__() + '\n'
return retval.rstrip('\n') | 0.004184 |
def setup(self, mujoco_objects, table_top_offset, table_size):
"""
Args:
Mujoco_objcts(MujocoObject * n_obj): object to be placed
table_top_offset(float * 3): location of table top center
table_size(float * 3): x,y,z-FULLsize of the table
"""
self.mujoco_objects = mujoco_objects
self.n_obj = len(self.mujoco_objects)
self.table_top_offset = table_top_offset
self.table_size = table_size | 0.004175 |
def to_unicode_or_none(value):
"""Converts C char arrays to unicode and C NULL values to None.
C char arrays are decoded from UTF-8.
"""
if value == ffi.NULL:
return None
elif isinstance(value, ffi.CData):
return ffi.string(value).decode('utf-8')
else:
raise ValueError('Value must be char[] or NULL') | 0.002857 |
def error(self, message):
'''Suppress default exit behavior'''
message = self._remessage_invalid_subparser(message)
raise utils.UsageError(message) | 0.011696 |
def has_predecessor(self, graph, dest, orig, branch, turn, tick, *, forward=None):
"""Return whether an edge connects the destination to the origin at the given time.
Doesn't require the edge's index, which makes it slower than retrieving a
particular edge.
"""
if forward is None:
forward = self.db._forward
return orig in self._get_origcache(graph, dest, branch, turn, tick, forward=forward) | 0.013187 |
def save(self, file_tag='2016', add_header='N'):
"""
save table to folder in appropriate files
NOTE - ONLY APPEND AT THIS STAGE - THEN USE DATABASE
"""
fname = self.get_filename(file_tag)
with open(fname, 'a') as f:
if add_header == 'Y':
f.write(self.format_hdr())
for e in self.table:
f.write(e.format_csv()) | 0.011494 |
def create_job(self, phases, name=None, input=None):
"""CreateJob
https://apidocs.joyent.com/manta/api.html#CreateJob
"""
log.debug('CreateJob')
path = '/%s/jobs' % self.account
body = {"phases": phases}
if name:
body["name"] = name
if input:
body["input"] = input
headers = {"Content-Type": "application/json"}
res, content = self._request(path,
"POST",
body=json.dumps(body),
headers=headers)
if res["status"] != '201':
raise errors.MantaAPIError(res, content)
location = res["location"]
assert res["location"]
job_id = res["location"].rsplit('/', 1)[-1]
return job_id | 0.002389 |
def _total_microsec(t1, t2):
"""
Calculate difference between two datetime stamps in microseconds.
:type t1: :class: `datetime.datetime`
:type t2: :class: `datetime.datetime`
:return: int
.. rubric:: Example
>>> print(_total_microsec(UTCDateTime(2013, 1, 1).datetime,
... UTCDateTime(2014, 1, 1).datetime))
-31536000000000
"""
td = t1 - t2
return (td.seconds + td.days * 24 * 3600) * 10 ** 6 + td.microseconds | 0.002075 |
def append(self, item):
"""
Add the given item as children
"""
if self.url:
raise TypeError('Menu items with URL cannot have childrens')
# Look for already present common node
if not item.is_leaf():
for current_item in self.items:
if item.name == current_item.name:
for children in item.items:
current_item.append(children)
return
# First insertion
self.items.append(item) | 0.00369 |
def fail(self):
"""Fail a vector."""
if self.failed is True:
raise AttributeError("Cannot fail {} - it has already failed.".format(self))
else:
self.failed = True
self.time_of_death = timenow()
for t in self.transmissions():
t.fail() | 0.009317 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.