text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _resolve(self, path, method, urlargs=None):
'''Resolve a path and return a ``(handler, urlargs)`` tuple or
``None`` if the path could not be resolved.
'''
match = self.route.match(path)
if match is None:
if not self.route.is_leaf: # no match
return
elif '__remaining__' in match:
path = match.pop('__remaining__')
urlargs = update_args(urlargs, match)
else:
handler = getattr(self, method, None)
if handler is None:
raise MethodNotAllowed
response_wrapper = self.response_wrapper
if response_wrapper:
handler = partial(response_wrapper, handler)
return Handler(self, handler, update_args(urlargs, match))
#
for handler in self.routes:
view_args = handler._resolve(path, method, urlargs)
if view_args is None:
continue
return view_args | 0.001986 |
def _debug_string(self, debug, data):
"""
Annotate a frames data, if debug is True.
"""
if not debug:
return data
if self.command in [
SLOT.CONFIG,
SLOT.CONFIG2,
SLOT.UPDATE1,
SLOT.UPDATE2,
SLOT.SWAP,
]:
# annotate according to config_st (see ykdef.h)
if yubico_util.ord_byte(data[-1]) == 0x80:
return (data, "FFFFFFF") # F = Fixed data (16 bytes)
if yubico_util.ord_byte(data[-1]) == 0x81:
return (data, "FFFFFFF")
if yubico_util.ord_byte(data[-1]) == 0x82:
return (data, "FFUUUUU") # U = UID (6 bytes)
if yubico_util.ord_byte(data[-1]) == 0x83:
return (data, "UKKKKKK") # K = Key (16 bytes)
if yubico_util.ord_byte(data[-1]) == 0x84:
return (data, "KKKKKKK")
if yubico_util.ord_byte(data[-1]) == 0x85:
return (data, "KKKAAAA") # A = Access code to set (6 bytes)
if yubico_util.ord_byte(data[-1]) == 0x86:
return (data, "AAlETCr") # l = Length of fixed field (1 byte)
# E = extFlags (1 byte)
# T = tktFlags (1 byte)
# C = cfgFlags (1 byte)
# r = RFU (2 bytes)
if yubico_util.ord_byte(data[-1]) == 0x87:
return (data, "rCRaaaa") # CR = CRC16 checksum (2 bytes)
# a = Access code to use (6 bytes)
if yubico_util.ord_byte(data[-1]) == 0x88:
return (data, 'aa')
# after payload
if yubico_util.ord_byte(data[-1]) == 0x89:
return (data, " Scr")
return (data, '') | 0.006296 |
def collect_data(bids_dir, participant_label, task=None, echo=None,
bids_validate=True):
"""
Uses pybids to retrieve the input data for a given participant
>>> bids_root, _ = collect_data(str(datadir / 'ds054'), '100185',
... bids_validate=False)
>>> bids_root['fmap'] # doctest: +ELLIPSIS
['.../ds054/sub-100185/fmap/sub-100185_magnitude1.nii.gz', \
'.../ds054/sub-100185/fmap/sub-100185_magnitude2.nii.gz', \
'.../ds054/sub-100185/fmap/sub-100185_phasediff.nii.gz']
>>> bids_root['bold'] # doctest: +ELLIPSIS
['.../ds054/sub-100185/func/sub-100185_task-machinegame_run-01_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-02_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-03_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-04_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-05_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-06_bold.nii.gz']
>>> bids_root['sbref'] # doctest: +ELLIPSIS
['.../ds054/sub-100185/func/sub-100185_task-machinegame_run-01_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-02_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-03_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-04_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-05_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-06_sbref.nii.gz']
>>> bids_root['t1w'] # doctest: +ELLIPSIS
['.../ds054/sub-100185/anat/sub-100185_T1w.nii.gz']
>>> bids_root['t2w'] # doctest: +ELLIPSIS
[]
"""
if isinstance(bids_dir, BIDSLayout):
layout = bids_dir
else:
layout = BIDSLayout(str(bids_dir), validate=bids_validate)
queries = {
'fmap': {'datatype': 'fmap'},
'bold': {'datatype': 'func', 'suffix': 'bold'},
'sbref': {'datatype': 'func', 'suffix': 'sbref'},
'flair': {'datatype': 'anat', 'suffix': 'FLAIR'},
't2w': {'datatype': 'anat', 'suffix': 'T2w'},
't1w': {'datatype': 'anat', 'suffix': 'T1w'},
'roi': {'datatype': 'anat', 'suffix': 'roi'},
}
if task:
queries['bold']['task'] = task
if echo:
queries['bold']['echo'] = echo
subj_data = {
dtype: sorted(layout.get(return_type='file', subject=participant_label,
extensions=['nii', 'nii.gz'], **query))
for dtype, query in queries.items()}
# Special case: multi-echo BOLD, grouping echos
if any(['_echo-' in bold for bold in subj_data['bold']]):
subj_data['bold'] = group_multiecho(subj_data['bold'])
return subj_data, layout | 0.001065 |
def get_alt_az(utc_time, lon, lat):
"""Return sun altitude and azimuth from *utc_time*, *lon*, and *lat*.
lon,lat in degrees
What is the unit of the returned angles and heights!? FIXME!
"""
lon = np.deg2rad(lon)
lat = np.deg2rad(lat)
ra_, dec = sun_ra_dec(utc_time)
h__ = _local_hour_angle(utc_time, lon, ra_)
return (np.arcsin(np.sin(lat) * np.sin(dec) +
np.cos(lat) * np.cos(dec) * np.cos(h__)),
np.arctan2(-np.sin(h__), (np.cos(lat) * np.tan(dec) -
np.sin(lat) * np.cos(h__)))) | 0.001698 |
def compare_list_iter(propval_a, propval_b, fs_a=None, fs_b=None,
options=None):
"""Generator for comparing 'simple' lists when they are encountered. This
does not currently recurse further. Arguments are as per other
``compare_``\ *X* functions.
"""
if fs_a is None:
fs_a = FieldSelector(tuple())
fs_b = FieldSelector(tuple())
if not options:
options = DiffOptions()
propvals = dict(a=propval_a, b=propval_b)
values = dict()
indices = dict()
for x in "a", "b":
propval_x = propvals[x]
vals = values[x] = set()
rev_key = indices[x] = dict()
seen = collections.Counter()
for i, v in collection_generator(propval_x):
v = options.normalize_item(
v, propval_a if options.duck_type else propval_x
)
if not v.__hash__:
v = repr(v)
if v is not _nothing or not options.ignore_empty_slots:
vals.add((v, seen[v]))
rev_key[(v, seen[v])] = i
seen[v] += 1
removed = values['a'] - values['b']
added = values['b'] - values['a']
if options.unchanged or options.moved:
unchanged = values['a'] & values['b']
for v, seq in unchanged:
a_idx = indices['a'][v, seq]
b_idx = indices['b'][v, seq]
if options.moved and a_idx != b_idx:
yield DiffInfo(
diff_type=DiffTypes.MOVED,
base=fs_a + [a_idx],
other=fs_b + [b_idx],
)
elif options.unchanged:
yield DiffInfo(
diff_type=DiffTypes.NO_CHANGE,
base=fs_a + [a_idx],
other=fs_b + [b_idx],
)
removed_idx = set(indices['a'][v, seq] for v, seq in removed)
added_idx = set(indices['b'][v, seq] for v, seq in added)
modified_idx = set(removed_idx.intersection(added_idx))
for v, seq in removed:
a_key = indices['a'][v, seq]
if a_key in modified_idx:
continue
selector = fs_a + [a_key]
yield DiffInfo(
diff_type=DiffTypes.REMOVED,
base=selector,
other=fs_b,
)
for v, seq in added:
b_key = indices['b'][v, seq]
if b_key in modified_idx:
continue
selector = fs_b + [b_key]
yield DiffInfo(
diff_type=DiffTypes.ADDED,
base=fs_a,
other=selector,
)
for idx in modified_idx:
yield DiffInfo(
diff_type=DiffTypes.MODIFIED,
base=fs_a + [idx],
other=fs_b + [idx],
) | 0.000729 |
def event(self, event): # pylint: disable-msg=R0201
"""Handle a stream event.
Called when connection state is changed.
Should not be called with self.lock acquired!
"""
event.stream = self
logger.debug(u"Stream event: {0}".format(event))
self.settings["event_queue"].put(event)
return False | 0.008427 |
def get_all(self, key, fallback=None):
"""returns all header values for given key"""
if key in self.headers:
value = self.headers[key]
else:
value = fallback or []
return value | 0.008621 |
def ServiceWorker_deliverPushMessage(self, origin, registrationId, data):
"""
Function path: ServiceWorker.deliverPushMessage
Domain: ServiceWorker
Method name: deliverPushMessage
Parameters:
Required arguments:
'origin' (type: string) -> No description
'registrationId' (type: string) -> No description
'data' (type: string) -> No description
No return value.
"""
assert isinstance(origin, (str,)
), "Argument 'origin' must be of type '['str']'. Received type: '%s'" % type(
origin)
assert isinstance(registrationId, (str,)
), "Argument 'registrationId' must be of type '['str']'. Received type: '%s'" % type(
registrationId)
assert isinstance(data, (str,)
), "Argument 'data' must be of type '['str']'. Received type: '%s'" % type(
data)
subdom_funcs = self.synchronous_command('ServiceWorker.deliverPushMessage',
origin=origin, registrationId=registrationId, data=data)
return subdom_funcs | 0.043566 |
def simulate(protocol_file,
propagate_logs=False,
log_level='warning') -> List[Mapping[str, Any]]:
"""
Simulate the protocol itself.
This is a one-stop function to simulate a protocol, whether python or json,
no matter the api version, from external (i.e. not bound up in other
internal server infrastructure) sources.
To simulate an opentrons protocol from other places, pass in a file like
object as protocol_file; this function either returns (if the simulation
has no problems) or raises an exception.
To call from the command line use either the autogenerated entrypoint
``opentrons_simulate`` (``opentrons_simulate.exe``, on windows) or
``python -m opentrons.simulate``.
The return value is the run log, a list of dicts that represent the
commands executed by the robot. Each dict has the following keys:
- ``level``: The depth at which this command is nested - if this an
aspirate inside a mix inside a transfer, for instance,
it would be 3.
- ``payload``: The command, its arguments, and how to format its text.
For more specific details see
:py:mod:`opentrons.commands`. To format a message from
a payload do ``payload['text'].format(**payload)``.
- ``logs``: Any log messages that occurred during execution of this
command, as a logging.LogRecord
:param file-like protocol_file: The protocol file to simulate.
:param propagate_logs: Whether this function should allow logs from the
Opentrons stack to propagate up to the root handler.
This can be useful if you're integrating this
function in a larger application, but most logs that
occur during protocol simulation are best associated
with the actions in the protocol that cause them.
:type propagate_logs: bool
:param log_level: The level of logs to capture in the runlog
:type log_level: 'debug', 'info', 'warning', or 'error'
:returns List[Dict[str, Dict[str, Any]]]: A run log for user output.
"""
stack_logger = logging.getLogger('opentrons')
stack_logger.propagate = propagate_logs
contents = protocol_file.read()
if opentrons.config.feature_flags.use_protocol_api_v2():
try:
execute_args = {'protocol_json': json.loads(contents)}
except json.JSONDecodeError:
execute_args = {'protocol_code': contents}
context = opentrons.protocol_api.contexts.ProtocolContext()
context.home()
scraper = CommandScraper(stack_logger, log_level, context.broker)
execute_args.update({'simulate': True,
'context': context})
opentrons.protocol_api.execute.run_protocol(**execute_args)
else:
try:
proto = json.loads(contents)
except json.JSONDecodeError:
proto = contents
opentrons.robot.disconnect()
scraper = CommandScraper(stack_logger, log_level,
opentrons.robot.broker)
if isinstance(proto, dict):
opentrons.protocols.execute_protocol(proto)
else:
exec(proto, {})
return scraper.commands | 0.000294 |
def request_timestamp(self):
"""
The timestamp of the request in ISO8601 YYYYMMDD'T'HHMMSS'Z' format.
If this is not available in the query parameters or headers, or the
value is not a valid format for AWS SigV4, an AttributeError exception
is raised.
"""
amz_date = self.query_parameters.get(_x_amz_date)
if amz_date is not None:
amz_date = amz_date[0]
else:
amz_date = self.headers.get(_x_amz_date)
if amz_date is None:
date = self.headers.get(_date)
if date is None:
raise AttributeError("Date was not passed in the request")
# This isn't really valid -- seems to be a bug in the AWS
# documentation.
if _iso8601_timestamp_regex.match(date):
amz_date = date # pragma: nocover
else:
# Parse this as an HTTP date and reformulate it.
amz_date = (datetime.strptime(date, _http_date_format)
.strftime("%Y%m%dT%H%M%SZ"))
if not _iso8601_timestamp_regex.match(amz_date):
raise AttributeError("X-Amz-Date parameter is not a valid ISO8601 "
"string: %r" % amz_date)
return amz_date | 0.002214 |
def sourceDirValidationError(dirname, component_name):
''' validate source directory names in components '''
if dirname == component_name:
return 'Module %s public include directory %s should not contain source files' % (component_name, dirname)
elif dirname.lower() in ('source', 'src') and dirname != 'source':
return 'Module %s has non-standard source directory name: "%s" should be "source"' % (component_name, dirname)
elif isPotentialTestDir(dirname) and dirname != 'test':
return 'Module %s has non-standard test directory name: "%s" should be "test"' % (component_name, dirname)
elif not Source_Dir_Regex.match(dirname):
corrected = Source_Dir_Invalid_Regex.sub('', dirname.lower())
if not corrected:
corrected = 'source'
return 'Module %s has non-standard source directory name: "%s" should be "%s"' % (component_name, dirname, corrected)
else:
return None | 0.005225 |
def complete(self):
"""
Determine if the analyses of the strains are complete e.g. there are no missing GDCS genes, and the
sample.general.bestassemblyfile != 'NA'
"""
# Boolean to store the completeness of the analyses
allcomplete = True
# Clear the list of samples that still require more sequence data
self.incomplete = list()
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
try:
# If the sample has been tagged as incomplete, only add it to the complete metadata list if the
# pipeline is on its final iteration
if sample.general.incomplete:
if self.final:
self.completemetadata.append(sample)
else:
sample.general.complete = False
allcomplete = False
self.incomplete.append(sample.name)
except AttributeError:
sample.general.complete = True
self.completemetadata.append(sample)
else:
if self.final:
self.completemetadata.append(sample)
else:
sample.general.complete = False
allcomplete = False
self.incomplete.append(sample.name)
# If all the samples are complete, set the global variable for run completeness to True
if allcomplete:
self.analysescomplete = True | 0.003676 |
def setProperty(self, name, value):
'''
Sets one of the supported property values of the speech engine listed
above. If a value is invalid, attempts to clip it / coerce so it is
valid before giving up and firing an exception.
@param name: Property name
@type name: str
@param value: Property value
@type value: object
@raise KeyError: When the property name is unknown
@raise ValueError: When the value cannot be coerced to fit the property
'''
if name == 'voice':
v = filter(lambda v: v.id == value, self._config['voices'])
self._config['voice'] = v[0]
elif name == 'rate':
self._config['rate'] = value
elif name == 'volume':
self._config['volume'] = value
else:
raise KeyError('unknown property %s' % name) | 0.00225 |
def eval_basis(self, x, regularize=True):
"""
basis_mat = C.eval_basis(x)
Evaluates self's basis functions on x and returns them stacked
in a matrix. basis_mat[i,j] gives basis function i (formed by
multiplying basis functions) evaluated at x[j,:].
"""
# Make object of same shape as self.basis, fill in with evals of each individual basis factor.
# Make object of same shape as diag(coef_cov), fill in with products of those evals.
# Reshape and return.
if regularize:
x = regularize_array(x)
out = zeros(self.shape + (x.shape[0],), dtype=float, order='F')
# Evaluate the basis factors
basis_factors = []
for i in xrange(self.ndim):
basis_factors.append([])
for j in xrange(self.n_per_dim[i]):
basis_factors[i].append(self.basis[i][j](x, **self.params))
out = ones((self.n, x.shape[0]), dtype=float)
out_reshaped = out.reshape(self.shape + (x.shape[0],))
for ind in ndindex(self.shape):
for dim in xrange(self.ndim):
out_reshaped[ind] *= basis_factors[dim][ind[dim]]
return out | 0.003311 |
def batchInsert(self, itemType, itemAttributes, dataRows):
"""
Create multiple items in the store without loading
corresponding Python objects into memory.
the items' C{stored} callback will not be called.
Example::
myData = [(37, u"Fred", u"Wichita"),
(28, u"Jim", u"Fresno"),
(43, u"Betty", u"Dubuque")]
myStore.batchInsert(FooItem,
[FooItem.age, FooItem.name, FooItem.city],
myData)
@param itemType: an Item subclass to create instances of.
@param itemAttributes: an iterable of attributes on the Item subclass.
@param dataRows: an iterable of iterables, each the same
length as C{itemAttributes} and containing data corresponding
to each attribute in it.
@return: None.
"""
class FakeItem:
pass
_NEEDS_DEFAULT = object() # token for lookup failure
fakeOSelf = FakeItem()
fakeOSelf.store = self
sql = itemType._baseInsertSQL(self)
indices = {}
schema = [attr for (name, attr) in itemType.getSchema()]
for i, attr in enumerate(itemAttributes):
indices[attr] = i
for row in dataRows:
oid = self.store.executeSchemaSQL(
_schema.CREATE_OBJECT, [self.store.getTypeID(itemType)])
insertArgs = [oid]
for attr in schema:
i = indices.get(attr, _NEEDS_DEFAULT)
if i is _NEEDS_DEFAULT:
pyval = attr.default
else:
pyval = row[i]
dbval = attr._convertPyval(fakeOSelf, pyval)
insertArgs.append(dbval)
self.executeSQL(sql, insertArgs) | 0.00163 |
def remove_data(self, request, pk=None):
"""Remove data from collection."""
collection = self.get_object()
if 'ids' not in request.data:
return Response({"error": "`ids`parameter is required"}, status=status.HTTP_400_BAD_REQUEST)
for data_id in request.data['ids']:
collection.data.remove(data_id)
return Response() | 0.007853 |
def diff_time(t1, t2):
"""
Calculates datetime.timedelta between two datetime.time values.
:param t1: First time
:type t1: datetime.time
:param t2: Second time
:type t2: datetime.time
:return: Differences between t1 and t2 or None when t1 or t2 is None
:rtype: datetime.timedelta/None
:raise: ValueError when t1 or t2 is not datetime.time or None
"""
if t1 is None or t2 is None:
return None
if not isinstance(t1, time):
raise ValueError('"t1" must be a datetime.time')
if not isinstance(t2, time):
raise ValueError('"t2" must be a datetime.time')
dt1 = datetime(1, 1, 1,
t1.hour, t1.minute, t1.second, t1.microsecond, t1.tzinfo)
dt2 = datetime(1, 1, 1,
t2.hour, t2.minute, t2.second, t2.microsecond, t2.tzinfo)
return dt1 - dt2 | 0.001167 |
def _desy_bookkeeping2marc(self, key, value):
"""Populate the ``595_D`` MARC field.
Also populates the ``035`` MARC field through side effects.
"""
if 'identifier' not in value:
return {
'a': value.get('expert'),
'd': value.get('date'),
's': value.get('status'),
}
self.setdefault('035', []).append({
'9': 'DESY',
'z': value['identifier']
}) | 0.002299 |
def load_udata_commands(self, ctx):
'''
Load udata commands from:
- `udata.commands.*` module
- known internal modules with commands
- plugins exporting a `udata.commands` entrypoint
'''
if self._udata_commands_loaded:
return
# Load all commands submodules
pattern = os.path.join(os.path.dirname(__file__), '[!_]*.py')
for filename in iglob(pattern):
module = os.path.splitext(os.path.basename(filename))[0]
try:
__import__('udata.commands.{0}'.format(module))
except Exception as e:
error('Unable to import {0}'.format(module), e)
# Load all core modules commands
for module in MODULES_WITH_COMMANDS:
try:
__import__('udata.{0}.commands'.format(module))
except Exception as e:
error('Unable to import {0}'.format(module), e)
# Load commands from entry points for enabled plugins
app = ctx.ensure_object(ScriptInfo).load_app()
entrypoints.get_enabled('udata.commands', app)
# Ensure loading happens once
self._udata_commands_loaded = False | 0.001646 |
def check_vip_ip(self, ip, id_evip):
"""
Get a Ipv4 or Ipv6 for Vip request
:param ip: IPv4 or Ipv6. 'xxx.xxx.xxx.xxx or xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx'
:return: Dictionary with the following structure:
::
{'ip': {'ip': < ip - octs for ipv4, blocks for ipv6 - >,
'id': <id>,
'network4 or network6'}}.
:raise IpNaoExisteError: Ipv4 or Ipv6 not found.
:raise EnvironemntVipNotFoundError: Vip environment not found.
:raise IPNaoDisponivelError: Ip not available for Vip Environment.
:raise UserNotAuthorizedError: User dont have permission to perform operation.
:raise InvalidParameterError: Ip string or vip environment is none or invalid.
:raise XMLError: Networkapi failed to generate the XML response.
:raise DataBaseError: Networkapi failed to access the database.
"""
ip_map = dict()
ip_map['ip'] = ip
ip_map['id_evip'] = id_evip
url = "ip/checkvipip/"
code, xml = self.submit({'ip_map': ip_map}, 'POST', url)
return self.response(code, xml) | 0.004344 |
def list(self, kind, tag_slug, cur_p=''):
'''
根据 cat_handler.py 中的 def view_cat_new(self, cat_slug, cur_p = '')
'''
# 下面用来使用关键字过滤信息,如果网站信息量不是很大不要开启
# Todo:
# if self.get_current_user():
# redisvr.sadd(config.redis_kw + self.userinfo.user_name, tag_slug)
if cur_p == '':
current_page_number = 1
else:
current_page_number = int(cur_p)
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MPost2Label.total_number(tag_slug, kind) / CMS_CFG['list_num'])
tag_info = MLabel.get_by_slug(tag_slug)
if tag_info:
tag_name = tag_info.name
else:
tag_name = 'Label search results'
kwd = {'tag_name': tag_name,
'tag_slug': tag_slug,
'title': tag_name,
'current_page': current_page_number,
'router': router_post[kind],
'kind': kind
}
the_list_file = './templates/list/label_{kind}.html'.format(kind=kind)
if os.path.exists(the_list_file):
tmpl = 'list/label_{kind}.html'.format(kind=kind)
else:
tmpl = 'list/label.html'
self.render(tmpl,
infos=MPost2Label.query_pager_by_slug(
tag_slug,
kind=kind,
current_page_num=current_page_number
),
kwd=kwd,
userinfo=self.userinfo,
pager=self.gen_pager(kind, tag_slug, pager_num, current_page_number),
cfg=CMS_CFG) | 0.002952 |
def _compute_needed_metrics(self, instance, available_metrics):
""" Compare the available metrics for one MOR we have computed and intersect them
with the set of metrics we want to report
"""
i_key = self._instance_key(instance)
if self.in_compatibility_mode(instance):
if instance.get('all_metrics', False):
return available_metrics
wanted_metrics = []
# Get only the basic metrics
for counter_id in available_metrics:
# No cache yet, skip it for now
if not self.metadata_cache.contains(i_key, counter_id):
self.log.debug(
"No metadata found for counter {}, will not collect it".format(ensure_unicode(counter_id))
)
continue
metadata = self.metadata_cache.get_metadata(i_key, counter_id)
if metadata.get('name') in BASIC_METRICS:
wanted_metrics.append(vim.PerformanceManager.MetricId(counterId=counter_id, instance="*"))
return wanted_metrics
else:
# The metadata cache contains only metrics of the desired level, so use it to filter the metrics to keep
return [
vim.PerformanceManager.MetricId(counterId=counter_id, instance="*")
for counter_id in available_metrics
if self.metadata_cache.contains(i_key, counter_id)
] | 0.004673 |
def is_namespace_valid( namespace_id ):
"""
Is a namespace ID valid?
>>> is_namespace_valid('abcd')
True
>>> is_namespace_valid('+abcd')
False
>>> is_namespace_valid('abc.def')
False
>>> is_namespace_valid('.abcd')
False
>>> is_namespace_valid('abcdabcdabcdabcdabcd')
False
>>> is_namespace_valid('abcdabcdabcdabcdabc')
True
"""
if not is_b40( namespace_id ) or "+" in namespace_id or namespace_id.count(".") > 0:
return False
if len(namespace_id) == 0 or len(namespace_id) > LENGTHS['blockchain_id_namespace_id']:
return False
return True | 0.011111 |
def _send_response(
self, environ, start_response, root_res, success_code, error_list
):
"""Send WSGI response (single or multistatus).
- If error_list is None or [], then <success_code> is send as response.
- If error_list contains a single error with a URL that matches root_res,
then this error is returned.
- If error_list contains more than one error, then '207 Multi-Status' is
returned.
"""
assert success_code in (HTTP_CREATED, HTTP_NO_CONTENT, HTTP_OK)
if not error_list:
# Status OK
return util.send_status_response(environ, start_response, success_code)
if len(error_list) == 1 and error_list[0][0] == root_res.get_href():
# Only one error that occurred on the root resource
return util.send_status_response(environ, start_response, error_list[0][1])
# Multiple errors, or error on one single child
multistatusEL = xml_tools.make_multistatus_el()
for refurl, e in error_list:
# assert refurl.startswith("http:")
assert refurl.startswith("/")
assert isinstance(e, DAVError)
responseEL = etree.SubElement(multistatusEL, "{DAV:}response")
etree.SubElement(responseEL, "{DAV:}href").text = refurl
etree.SubElement(responseEL, "{DAV:}status").text = "HTTP/1.1 {}".format(
get_http_status_string(e)
)
return util.send_multi_status_response(environ, start_response, multistatusEL) | 0.005754 |
def edges(s, edges, alpha=1.0, weighted=False, directed=False):
""" Visualization of the edges in a network.
"""
p = s._ctx.BezierPath()
if directed and s.stroke:
pd = s._ctx.BezierPath()
if weighted and s.fill:
pw = [s._ctx.BezierPath() for i in range(11)]
# Draw the edges in a single BezierPath for speed.
# Weighted edges are divided into ten BezierPaths,
# depending on their weight rounded between 0 and 10.
if len(edges) == 0: return
for e in edges:
try: s2 = e.node1.graph.styles[e.node1.style]
except: s2 = s
if s2.edge:
s2.edge(s2, p, e, alpha)
if directed and s.stroke:
s2.edge_arrow(s2, pd, e, radius=10)
if weighted and s.fill:
s2.edge(s2, pw[int(e.weight*10)], e, alpha)
s._ctx.autoclosepath(False)
s._ctx.nofill()
s._ctx.nostroke()
# All weighted edges use the default fill.
if weighted and s.fill:
r = e.node1.__class__(None).r
s._ctx.stroke(
s.fill.r,
s.fill.g,
s.fill.b,
s.fill.a * 0.65 * alpha
)
for w in range(1, len(pw)):
s._ctx.strokewidth(r*w*0.1)
s._ctx.drawpath(pw[w].copy())
# All edges use the default stroke.
if s.stroke:
s._ctx.strokewidth(s.strokewidth)
s._ctx.stroke(
s.stroke.r,
s.stroke.g,
s.stroke.b,
s.stroke.a * 0.65 * alpha
)
s._ctx.drawpath(p.copy())
if directed and s.stroke:
#clr = s._ctx.stroke().copy()
clr=s._ctx.color(
s.stroke.r,
s.stroke.g,
s.stroke.b,
s.stroke.a * 0.65 * alpha
)
clr.a *= 1.3
s._ctx.stroke(clr)
s._ctx.drawpath(pd.copy())
for e in edges:
try: s2 = self.styles[e.node1.style]
except: s2 = s
if s2.edge_label:
s2.edge_label(s2, e, alpha) | 0.023434 |
def create_directory(self, filename):
"""Create a subdirectory in the temporary directory."""
path = os.path.join(self.path, filename)
makedirs(path)
return path | 0.010363 |
def _analyze_states(state: GlobalState) -> List[Issue]:
"""
:param state: the current state
:return: returns the issues for that corresponding state
"""
call = get_call_from_state(state)
if call is None:
return []
issues = [] # type: List[Issue]
if call.type is not "DELEGATECALL":
return []
if state.environment.active_function_name is not "fallback":
return []
state = call.state
address = state.get_current_instruction()["address"]
meminstart = get_variable(state.mstate.stack[-3])
if meminstart.type == VarType.CONCRETE:
issues += _concrete_call(call, state, address, meminstart)
return issues | 0.001449 |
def analyze(self,A):
"""
Analyzes structure of A.
Parameters
----------
A : matrix
For symmetric systems, should contain only lower diagonal part.
"""
A = coo_matrix(A)
self.mumps.set_shape(A.shape[0])
self.mumps.set_centralized_assembled_rows_cols(A.row+1,A.col+1)
self.mumps.run(job=1)
self.analyzed = True | 0.016317 |
def get_gene_goterms(self, gene, ancestors=False):
"""Return all GO terms a particular gene is annotated with.
Parameters
----------
gene: str
The gene symbol of the gene.
ancestors: bool, optional
If set to True, also return all ancestor GO terms.
Returns
-------
set of GOTerm objects
The set of GO terms the gene is annotated with.
Notes
-----
If a gene is annotated with a particular GO term, it can also be
considered annotated with all ancestors of that GO term.
"""
annotations = self.gene_annotations[gene]
terms = set(ann.term for ann in annotations)
if ancestors:
assert self._flattened
ancestor_terms = set()
for t in terms:
ancestor_terms.update(self.terms[id_] for id_ in t.ancestors)
terms |= ancestor_terms
return frozenset(terms) | 0.002026 |
def get_size(self, value=None):
"""Return the size in bytes.
Args:
value (bytes): In structs, the user can assign other value instead
of this class' instance. Here, in such cases, ``self`` is a
class attribute of the struct.
Returns:
int: The address size in bytes.
"""
if value is None:
value = self._value
if hasattr(value, 'get_size'):
return value.get_size()
return len(self.pack(value)) | 0.003759 |
def args_options():
""" Generates an arugment parser.
:returns:
Parser object
"""
parser = argparse.ArgumentParser(prog='landsat',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(DESCRIPTION))
subparsers = parser.add_subparsers(help='Landsat Utility',
dest='subs')
parser.add_argument('--version', action='version', version='%(prog)s version ' + __version__)
# Search Logic
parser_search = subparsers.add_parser('search',
help='Search Landsat metadata')
# Global search options
parser_search.add_argument('-l', '--limit', default=10, type=int,
help='Search return results limit\n'
'default is 10')
parser_search.add_argument('-s', '--start',
help='Start Date - Most formats are accepted '
'e.g. Jun 12 2014 OR 06/12/2014')
parser_search.add_argument('-e', '--end',
help='End Date - Most formats are accepted '
'e.g. Jun 12 2014 OR 06/12/2014')
parser_search.add_argument('--latest', default=-1, type=int,
help='returns the N latest images within the last 365 days')
parser_search.add_argument('-c', '--cloud', type=float, default=100.0,
help='Maximum cloud percentage '
'default is 100 perct')
parser_search.add_argument('-p', '--pathrow',
help='Paths and Rows in order separated by comma. Use quotes ("001").'
'Example: path,row,path,row 001,001,190,204')
parser_search.add_argument('--lat', type=float, help='The latitude')
parser_search.add_argument('--lon', type=float, help='The longitude')
parser_search.add_argument('--address', type=str, help='The address')
parser_search.add_argument('--json', action='store_true', help='Returns a bare JSON response')
parser_search.add_argument('--geojson', action='store_true', help='Returns a geojson response')
parser_download = subparsers.add_parser('download',
help='Download images from Google Storage')
parser_download.add_argument('scenes',
metavar='sceneID',
nargs="+",
help="Provide Full sceneID, e.g. LC81660392014196LGN00")
parser_download.add_argument('-b', '--bands', help='If you specify bands, landsat-util will try to download '
'the band from S3. If the band does not exist, an error is returned', default=None)
parser_download.add_argument('-d', '--dest', help='Destination path')
parser_download.add_argument('-p', '--process', help='Process the image after download', action='store_true')
parser_download.add_argument('--pansharpen', action='store_true',
help='Whether to also pansharpen the process '
'image. Pansharpening requires larger memory')
parser_download.add_argument('--ndvi', action='store_true',
help='Whether to run the NDVI process. If used, bands parameter is disregarded')
parser_download.add_argument('--ndvigrey', action='store_true', help='Create an NDVI map in grayscale (grey)')
parser_download.add_argument('--clip', help='Clip the image with the bounding box provided. Values must be in ' +
'WGS84 datum, and with longitude and latitude units of decimal degrees ' +
'separated by comma.' +
'Example: --clip=-346.06658935546875,49.93531194616915,-345.4595947265625,' +
'50.2682767372753')
parser_download.add_argument('-u', '--upload', action='store_true',
help='Upload to S3 after the image processing completed')
parser_download.add_argument('--username', help='USGS Eros account Username (only works if the account has' +
' special inventory access). Username and password as a fallback if the image' +
'is not found on AWS S3 or Google Storage')
parser_download.add_argument('--password', help='USGS Eros username, used as a fallback')
parser_download.add_argument('--key', help='Amazon S3 Access Key (You can also be set AWS_ACCESS_KEY_ID as '
'Environment Variables)')
parser_download.add_argument('--secret', help='Amazon S3 Secret Key (You can also be set AWS_SECRET_ACCESS_KEY '
'as Environment Variables)')
parser_download.add_argument('--bucket', help='Bucket name (required if uploading to s3)')
parser_download.add_argument('--region', help='URL to S3 region e.g. s3-us-west-2.amazonaws.com')
parser_download.add_argument('--force-unzip', help='Force unzip tar file', action='store_true')
parser_process = subparsers.add_parser('process', help='Process Landsat imagery')
parser_process.add_argument('path',
help='Path to the compressed image file')
parser_process.add_argument('--pansharpen', action='store_true',
help='Whether to also pansharpen the process '
'image. Pansharpening requires larger memory')
parser_process.add_argument('--ndvi', action='store_true', help='Create an NDVI map in color.')
parser_process.add_argument('--ndvigrey', action='store_true', help='Create an NDVI map in grayscale (grey)')
parser_process.add_argument('--clip', help='Clip the image with the bounding box provided. Values must be in ' +
'WGS84 datum, and with longitude and latitude units of decimal degrees ' +
'separated by comma.' +
'Example: --clip=-346.06658935546875,49.93531194616915,-345.4595947265625,' +
'50.2682767372753')
parser_process.add_argument('-b', '--bands', help='specify band combinations. Default is 432'
'Example: --bands 321', default='432')
parser_process.add_argument('-v', '--verbose', action='store_true',
help='Turn on verbosity')
parser_process.add_argument('-u', '--upload', action='store_true',
help='Upload to S3 after the image processing completed')
parser_process.add_argument('--key', help='Amazon S3 Access Key (You can also be set AWS_ACCESS_KEY_ID as '
'Environment Variables)')
parser_process.add_argument('--secret', help='Amazon S3 Secret Key (You can also be set AWS_SECRET_ACCESS_KEY '
'as Environment Variables)')
parser_process.add_argument('--bucket', help='Bucket name (required if uploading to s3)')
parser_process.add_argument('--region', help='URL to S3 region e.g. s3-us-west-2.amazonaws.com')
parser_process.add_argument('--force-unzip', help='Force unzip tar file', action='store_true')
return parser | 0.005229 |
def diff_a_thing(thing, opt):
"""Handle the diff action for a single thing. It may be a Vault backend
implementation or it may be a Vault data resource"""
changed = thing.diff()
if changed == ADD:
print("%s %s" % (maybe_colored("+", "green", opt), str(thing)))
elif changed == DEL:
print("%s %s" % (maybe_colored("-", "red", opt), str(thing)))
elif changed == CHANGED:
print("%s %s" % (maybe_colored("~", "yellow", opt), str(thing)))
elif changed == OVERWRITE:
print("%s %s" % (maybe_colored("+", "yellow", opt), str(thing)))
elif changed == CONFLICT:
print("%s %s" % (maybe_colored("!", "red", opt), str(thing)))
if changed != OVERWRITE and changed != NOOP:
maybe_details(thing, opt) | 0.0013 |
def to_dict(self):
"""Dump progress to a dictionary.
:return: Progress dictionary
:rtype: :class:`~python:dict`
"""
result = super(Progress, self).to_dict()
label = LABELS['last_progress_change'][self.progress_type]
result[label] = to_iso8601_datetime(self.last_progress_change)
if self.progress_type == 'watched':
result['reset_at'] = self.reset_at
result['seasons'] = [
season.to_dict()
for season in self.seasons.values()
]
if self.hidden_seasons:
result['hidden_seasons'] = [
popitems(season.to_dict(), ['number', 'ids'])
for season in self.hidden_seasons.values()
]
if self.next_episode:
result['next_episode'] = popitems(self.next_episode.to_dict(), ['season', 'number', 'title', 'ids'])
result['next_episode']['season'] = self.next_episode.keys[0][0]
if self.last_episode:
result['last_episode'] = popitems(self.last_episode.to_dict(), ['season', 'number', 'title', 'ids'])
result['last_episode']['season'] = self.last_episode.keys[0][0]
return result | 0.003281 |
def ApplicationReceiving(self, app, streams):
"""Called when the list of streams with data ready to be read changes."""
# we should only proceed if we are in TCP mode
if stype != socket.SOCK_STREAM:
return
# handle all streams
for stream in streams:
# read object from the stream
obj = StreamRead(stream)
if obj:
if obj[0] == cmdData:
# data were received, reroute it to the tunnel based on the tunnel ID
try:
TCPTunnel.threads[obj[1]].send(obj[2])
except KeyError:
pass
elif obj[0] == cmdConnect:
# a connection request received, connect the socket
n = obj[1]
sock = socket.socket(type=stype)
try:
sock.connect(addr)
# start the tunnel thread
TCPTunnel(sock, stream, n).start()
except socket.error, e:
# connection failed, send an error report back through the stream
print 'error (%s): %s' % (n, e)
StreamWrite(stream, cmdError, n, tuple(e))
StreamWrite(stream, cmdDisconnect, n)
elif obj[0] == cmdDisconnect:
# an disconnection request received, close the tunnel
try:
TCPTunnel.threads[obj[1]].close()
except KeyError:
pass
elif obj[0] == cmdError:
# connection failed on the other side, display the error
print 'error (%s): %s' % obj[1:2] | 0.002747 |
def as_pyemu_matrix(self,typ=Matrix):
"""
Create a pyemu.Matrix from the Ensemble.
Parameters
----------
typ : pyemu.Matrix or derived type
the type of matrix to return
Returns
-------
pyemu.Matrix : pyemu.Matrix
"""
x = self.values.copy().astype(np.float)
return typ(x=x,row_names=list(self.index),
col_names=list(self.columns)) | 0.010823 |
def overlay(self, dimensions=None, **kwargs):
"""Group by supplied dimension(s) and overlay each group
Groups data by supplied dimension(s) overlaying the groups
along the dimension(s).
Args:
dimensions: Dimension(s) of dimensions to group by
Returns:
NdOverlay object(s) with supplied dimensions
"""
if dimensions is None:
dimensions = self.kdims
else:
if not isinstance(dimensions, (list, tuple)):
dimensions = [dimensions]
dimensions = [self.get_dimension(d, strict=True)
for d in dimensions]
dims = [d for d in self.kdims if d not in dimensions]
return self.groupby(dims, group_type=NdOverlay) | 0.002558 |
def whisper(self, user, message):
"""
This seems super gimmicky, but so far this is the only way
way I've seen to do this (at least through IRC). It's definitely
not documented.
"""
if user[0] == '#':
LOGGER.warning(f"Whisper is for users only.")
else:
super().message('#jtv', f".w {user} {message}") | 0.005249 |
def bm3_k(p, v0, k0, k0p):
"""
calculate bulk modulus, wrapper for cal_k_bm3
cannot handle uncertainties
:param p: pressure
:param v0: volume at reference conditions
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at different conditions
:return: bulk modulus at high pressure
"""
return cal_k_bm3(p, [v0, k0, k0p]) | 0.002475 |
def stop(self):
"""
Stops the `git push` thread and commits all streamed files (Git.store_file and Git.stream_file), followed
by a final git push.
You can not start the process again.
"""
self.active_thread = False
if self.thread_push_instance and self.thread_push_instance.isAlive():
self.thread_push_instance.join()
with self.batch_commit('STREAM_END'):
for path, handle in six.iteritems(self.streamed_files.copy()):
# open again and read full content
full_path = os.path.normpath(self.temp_path + '/stream-blob/' + self.job_id + '/' + path)
self.logger.debug('Git stream end for file: ' + full_path)
del self.streamed_files[path]
# make sure its written to the disk
try:
self.stream_files_lock.acquire()
if not handle.closed:
handle.flush()
handle.close()
finally:
self.stream_files_lock.release()
with open(full_path, 'r') as f:
self.commit_file(path, path, f.read())
if not self.keep_stream_files:
os.unlink(full_path)
with self.batch_commit('STORE_END'):
for path, bar in six.iteritems(self.store_files.copy()):
full_path = os.path.normpath(self.temp_path + '/store-blob/' + self.job_id + '/' + path)
self.logger.debug('Git store end for file: ' + full_path)
del self.store_files[path]
try:
self.stream_files_lock.acquire()
self.commit_file(path, path, open(full_path, 'r').read())
finally:
self.stream_files_lock.release()
if not self.keep_stream_files:
os.unlink(full_path) | 0.003036 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'customizations') and self.customizations is not None:
_dict['customizations'] = [
x._to_dict() for x in self.customizations
]
return _dict | 0.006349 |
def ds2n(self):
"""Calculates the derivative of the neutron separation energies:
ds2n(Z,A) = s2n(Z,A) - s2n(Z,A+2)
"""
idx = [(x[0] + 0, x[1] + 2) for x in self.df.index]
values = self.s2n.values - self.s2n.loc[idx].values
return Table(df=pd.Series(values, index=self.df.index, name='ds2n' + '(' + self.name + ')')) | 0.008242 |
def wvcal_spectrum(sp, fxpeaks, poly_degree_wfit, wv_master,
wv_ini_search=None, wv_end_search=None,
wvmin_useful=None, wvmax_useful=None,
geometry=None, debugplot=0):
"""Execute wavelength calibration of a spectrum using fixed line peaks.
Parameters
----------
sp : 1d numpy array
Spectrum to be wavelength calibrated.
fxpeaks : 1d numpy array
Refined location of peaks in array index scale, i.e, from 0
to naxis1 - 1. The wavelength calibration is performed using
these line locations.
poly_degree_wfit : int
Degree for wavelength calibration polynomial.
wv_master : 1d numpy array
Array with arc line wavelengths.
wv_ini_search : float or None
Minimum expected wavelength in spectrum.
wv_end_search : float or None
Maximum expected wavelength in spectrum.
wvmin_useful : float or None
If not None, this value is used to clip detected lines below it.
wvmax_useful : float or None
If not None, this value is used to clip detected lines above it.
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the Qt backend geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
solution_wv : instance of SolutionArcCalibration
Wavelength calibration solution.
"""
# check there are enough lines for fit
if len(fxpeaks) <= poly_degree_wfit:
print(">>> Warning: not enough lines to fit spectrum")
return None
# spectrum dimension
naxis1 = sp.shape[0]
wv_master_range = wv_master[-1] - wv_master[0]
delta_wv_master_range = 0.20 * wv_master_range
if wv_ini_search is None:
wv_ini_search = wv_master[0] - delta_wv_master_range
if wv_end_search is None:
wv_end_search = wv_master[-1] + delta_wv_master_range
# use channels (pixels from 1 to naxis1)
xchannel = fxpeaks + 1.0
# wavelength calibration
list_of_wvfeatures = arccalibration(
wv_master=wv_master,
xpos_arc=xchannel,
naxis1_arc=naxis1,
crpix1=1.0,
wv_ini_search=wv_ini_search,
wv_end_search=wv_end_search,
wvmin_useful=wvmin_useful,
wvmax_useful=wvmax_useful,
error_xpos_arc=3,
times_sigma_r=3.0,
frac_triplets_for_sum=0.50,
times_sigma_theil_sen=10.0,
poly_degree_wfit=poly_degree_wfit,
times_sigma_polfilt=10.0,
times_sigma_cook=10.0,
times_sigma_inclusion=10.0,
geometry=geometry,
debugplot=debugplot
)
title = "Wavelength calibration"
solution_wv = fit_list_of_wvfeatures(
list_of_wvfeatures=list_of_wvfeatures,
naxis1_arc=naxis1,
crpix1=1.0,
poly_degree_wfit=poly_degree_wfit,
weighted=False,
plot_title=title,
geometry=geometry,
debugplot=debugplot
)
if abs(debugplot) % 10 != 0:
# final plot with identified lines
xplot = np.arange(1, naxis1 + 1, dtype=float)
ax = ximplotxy(xplot, sp, title=title, show=False,
xlabel='pixel (from 1 to NAXIS1)',
ylabel='number of counts',
geometry=geometry)
ymin = sp.min()
ymax = sp.max()
dy = ymax-ymin
ymin -= dy/20.
ymax += dy/20.
ax.set_ylim([ymin, ymax])
# plot wavelength of each identified line
for feature in solution_wv.features:
xpos = feature.xpos
reference = feature.reference
ax.text(xpos, sp[int(xpos+0.5)-1],
str(reference), fontsize=8,
horizontalalignment='center')
# show plot
print('Plot with identified lines')
pause_debugplot(12, pltshow=True)
# return the wavelength calibration solution
return solution_wv | 0.000244 |
def scalar_stats(data, functions=('min', 'max', 'mean', 'std')):
'''Calculate the stats from the given numpy functions
Parameters:
data: array of data points to be used for the stats
Options:
functions: tuple of numpy stat functions to apply on data
Returns:
Dictionary with the name of the function as key and the result
as the respective value
'''
stats = {}
for func in functions:
stats[func] = getattr(np, func)(data)
return stats | 0.001957 |
def setup(self, port):
"""Connects to an Arduino UNO on serial port `port`.
@throw RuntimeError can't connect to Arduino
"""
port = str(port)
# timeout is used by all I/O operations
self._serial = serial.Serial(port, 115200, timeout=2)
time.sleep(2) # time to Arduino reset
if not self._serial.is_open:
raise RuntimeError('Could not connect to Arduino')
self._serial.write(b'\x01')
if self._serial.read() != b'\x06':
raise RuntimeError('Could not connect to Arduino')
ps = [p for p in self.available_pins() if p['digital']['output']]
for pin in ps:
self._set_pin_direction(pin['id'], ahio.Direction.Output) | 0.002688 |
def set_dashboard_tags(self, id, **kwargs): # noqa: E501
"""Set all tags associated with a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_dashboard_tags(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param list[str] body:
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_dashboard_tags_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.set_dashboard_tags_with_http_info(id, **kwargs) # noqa: E501
return data | 0.002144 |
def __get_empty_config(self):
"""
Returns the config file contents as a string. The config file is generated and then deleted.
"""
self._generate_config()
path = self._get_config_path()
with open(path, 'r') as readable:
contents = readable.read()
os.remove(path)
return contents | 0.008475 |
def get(self, tags):
"""Find an adequate value for this field from a dict of tags."""
# Try to find our name
value = tags.get(self.name, '')
for name in self.alternate_tags:
# Iterate of alternates until a non-empty value is found
value = value or tags.get(name, '')
# If we still have nothing, return our default
value = value or self.default
return value | 0.004566 |
def load_descendant_articles_for_section(
context, section, featured_in_homepage=None, featured_in_section=None,
featured_in_latest=None, count=5):
"""
Returns all descendant articles (filtered using the parameters)
If the `locale_code` in the context is not the main language, it will
return the translations of the live articles.
"""
request = context.get('request')
locale = context.get('locale_code')
page = section.get_main_language_page()
settings = SiteSettings.for_site(request.site) \
if request else None
qs = ArticlePage.objects.descendant_of(page).filter(
language__is_main_language=True)
article_ordering = settings \
and settings.article_ordering_within_section
cms_ordering = article_ordering \
and settings.article_ordering_within_section !=\
ArticleOrderingChoices.CMS_DEFAULT_SORTING
if article_ordering and cms_ordering:
order_by = ArticleOrderingChoices.\
get(settings.article_ordering_within_section).name.lower()
order_by = order_by if order_by.find('_desc') == -1 \
else '-{}'.format(order_by.replace('_desc', ''))
qs = qs.order_by(order_by)
if featured_in_homepage is not None:
qs = qs.filter(featured_in_homepage=featured_in_homepage)\
.order_by('-featured_in_homepage_start_date')
if featured_in_latest is not None:
qs = qs.filter(featured_in_latest=featured_in_latest)
if featured_in_section is not None:
qs = qs.filter(featured_in_section=featured_in_section)\
.order_by('-featured_in_section_start_date')
if not locale:
return qs.live()[:count]
return get_pages(context, qs, locale)[:count] | 0.000568 |
def get(self, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(**kwargs)
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise Task.DoesNotExist(
'Task matching query does not exist. '
'Lookup parameters were {0}'.format(kwargs),
)
raise ValueError(
'get() returned more than one Task -- it returned {0}! '
'Lookup parameters were {1}'.format(num, kwargs),
) | 0.00318 |
def run(config, clear_opt=False):
"""Find an image and download it."""
flickr = flickrapi.FlickrAPI(config.get('walls', 'api_key'),
config.get('walls', 'api_secret'))
width = config.getint('walls', 'width')
height = config.getint('walls', 'height')
# Clear out the destination dir
if clear_opt:
clear_dir(os.path.expanduser(config.get('walls', 'image_dir')))
# Find an image
tags = config.get('walls', 'tags')
for photo in flickr.walk(tags=tags, format='etree'):
try:
photo_url = smallest_url(flickr, photo.get('id'), width, height)
if photo_url:
break
except (KeyError, ValueError, TypeError):
stderr_and_exit('Unexpected data from Flickr.\n')
else:
stderr_and_exit('No matching photos found.\n')
# Download the image
dest = os.path.expanduser(config.get('walls', 'image_dir'))
try:
download(photo_url, dest)
except IOError:
stderr_and_exit('Error downloading image.\n') | 0.000941 |
def disapproveworker(ctx, workers, account):
""" Disapprove worker(es)
"""
print_tx(ctx.bitshares.disapproveworker(workers, account=account)) | 0.006536 |
def gotoPrevious(self):
"""
Goes to the previous panel tab.
"""
index = self._currentPanel.currentIndex() - 1
if index < 0:
index = self._currentPanel.count() - 1
self._currentPanel.setCurrentIndex(index) | 0.010989 |
def _service_by_name(name):
'''
Return the service info for a service by label, filename or path
'''
services = _available_services()
name = name.lower()
if name in services:
# Match on label
return services[name]
for service in six.itervalues(services):
if service['file_path'].lower() == name:
# Match on full path
return service
basename, ext = os.path.splitext(service['filename'])
if basename.lower() == name:
# Match on basename
return service
return False | 0.001709 |
def is_toml_file(filename, show_warnings = False):
"""Check configuration file type is TOML
Return a boolean indicating wheather the file is TOML format or not
"""
if is_yaml_file(filename):
return(False)
try:
config_dict = load_config(filename, file_type = "toml")
is_toml = True
except:
is_toml = False
return(is_toml) | 0.015789 |
def interpolate(self, tres="<default>", fres="<default>", logf=False,
outseg=None):
"""Interpolate this `QGram` over a regularly-gridded spectrogram
Parameters
----------
tres : `float`, optional
desired time resolution (seconds) of output `Spectrogram`,
default is `abs(outseg) / 1000.`
fres : `float`, `int`, `None`, optional
desired frequency resolution (Hertz) of output `Spectrogram`,
or, if ``logf=True``, the number of frequency samples;
give `None` to skip this step and return the original resolution,
default is 0.5 Hz or 500 frequency samples
logf : `bool`, optional
boolean switch to enable (`True`) or disable (`False`) use of
log-sampled frequencies in the output `Spectrogram`
outseg : `~gwpy.segments.Segment`, optional
GPS `[start, stop)` segment for output `Spectrogram`,
default is the full duration of the input
Returns
-------
out : `~gwpy.spectrogram.Spectrogram`
output `Spectrogram` of normalised Q energy
See Also
--------
scipy.interpolate
this method uses `~scipy.interpolate.InterpolatedUnivariateSpline`
to cast all frequency rows to a common time-axis, and then
`~scipy.interpolate.interp2d` to apply the desired frequency
resolution across the band
Notes
-----
This method will return a `Spectrogram` of dtype ``float32`` if
``norm`` is given, and ``float64`` otherwise.
To optimize plot rendering with `~matplotlib.axes.Axes.pcolormesh`,
the output `~gwpy.spectrogram.Spectrogram` can be given a log-sampled
frequency axis by passing `logf=True` at runtime. The `fres` argument
is then the number of points on the frequency axis. Note, this is
incompatible with `~matplotlib.axes.Axes.imshow`.
It is also highly recommended to use the `outseg` keyword argument
when only a small window around a given GPS time is of interest.
"""
from scipy.interpolate import (interp2d, InterpolatedUnivariateSpline)
from ..spectrogram import Spectrogram
if outseg is None:
outseg = self.energies[0].span
frequencies = self.plane.frequencies
dtype = self.energies[0].dtype
# build regular Spectrogram from peak-Q data by interpolating each
# (Q, frequency) `TimeSeries` to have the same time resolution
if tres == "<default>":
tres = abs(Segment(outseg)) / 1000.
xout = numpy.arange(*outseg, step=tres)
nx = xout.size
ny = frequencies.size
out = Spectrogram(numpy.empty((nx, ny), dtype=dtype),
t0=outseg[0], dt=tres, frequencies=frequencies)
# record Q in output
out.q = self.plane.q
# interpolate rows
for i, row in enumerate(self.energies):
xrow = numpy.arange(row.x0.value, (row.x0 + row.duration).value,
row.dx.value)
interp = InterpolatedUnivariateSpline(xrow, row.value)
out[:, i] = interp(xout).astype(dtype, casting="same_kind",
copy=False)
if fres is None:
return out
# interpolate the spectrogram to increase its frequency resolution
# --- this is done because Duncan doesn't like interpolated images
# since they don't support log scaling
interp = interp2d(xout, frequencies, out.value.T, kind='cubic')
if not logf:
if fres == "<default>":
fres = .5
outfreq = numpy.arange(
self.plane.frange[0], self.plane.frange[1], fres,
dtype=dtype)
else:
if fres == "<default>":
fres = 500
# using `~numpy.logspace` here to support numpy-1.7.1 for EPEL7,
# but numpy-1.12.0 introduced the function `~numpy.geomspace`
logfmin = numpy.log10(self.plane.frange[0])
logfmax = numpy.log10(self.plane.frange[1])
outfreq = numpy.logspace(logfmin, logfmax, num=int(fres))
new = type(out)(
interp(xout, outfreq).T.astype(
dtype, casting="same_kind", copy=False),
t0=outseg[0], dt=tres, frequencies=outfreq,
)
new.q = self.plane.q
return new | 0.00066 |
def get_arctic_version(self, symbol, as_of=None):
"""
Return the numerical representation of the arctic version used to write the last (or as_of) version for
the given symbol.
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or int or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
Returns
-------
arctic_version : int
The numerical representation of Arctic version, used to create the specified symbol version
"""
return self._read_metadata(symbol, as_of=as_of).get('arctic_version', 0) | 0.006803 |
def start_pipeline(self, args=None, multi=False):
"""
Initialize setup. Do some setup, like tee output, print some diagnostics, create temp files.
You provide only the output directory (used for pipeline stats, log, and status flag files).
"""
# Perhaps this could all just be put into __init__, but I just kind of like the idea of a start function
self.make_sure_path_exists(self.outfolder)
# By default, Pypiper will mirror every operation so it is displayed both
# on sys.stdout **and** to a log file. Unfortunately, interactive python sessions
# ruin this by interfering with stdout. So, for interactive mode, we do not enable
# the tee subprocess, sending all output to screen only.
# Starting multiple PipelineManagers in the same script has the same problem, and
# must therefore be run in interactive_mode.
interactive_mode = multi or not hasattr(__main__, "__file__")
if interactive_mode:
print("Warning: You're running an interactive python session. "
"This works, but pypiper cannot tee the output, so results "
"are only logged to screen.")
else:
sys.stdout = Unbuffered(sys.stdout)
# sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # Unbuffer output
# The tee subprocess must be instructed to ignore TERM and INT signals;
# Instead, I will clean up this process in the signal handler functions.
# This is required because otherwise, if pypiper receives a TERM or INT,
# the tee will be automatically terminated by python before I have a chance to
# print some final output (for example, about when the process stopped),
# and so those things don't end up in the log files because the tee
# subprocess is dead. Instead, I will handle the killing of the tee process
# manually (in the exit handler).
# a for append to file
tee = subprocess.Popen(
["tee", "-a", self.pipeline_log_file], stdin=subprocess.PIPE,
preexec_fn=self._ignore_interrupts)
# If the pipeline is terminated with SIGTERM/SIGINT,
# make sure we kill this spawned tee subprocess as well.
# atexit.register(self._kill_child_process, tee.pid, proc_name="tee")
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
os.dup2(tee.stdin.fileno(), sys.stderr.fileno())
self.tee = tee
# For some reason, this exit handler function MUST be registered after
# the one that kills the tee process.
atexit.register(self._exit_handler)
# A future possibility to avoid this tee, is to use a Tee class; this works for anything printed here
# by pypiper, but can't tee the subprocess output. For this, it would require using threading to
# simultaneously capture and display subprocess output. I shelve this for now and stick with the tee option.
# sys.stdout = Tee(self.pipeline_log_file)
# Record the git version of the pipeline and pypiper used. This gets (if it is in a git repo):
# dir: the directory where the code is stored
# hash: the commit id of the last commit in this repo
# date: the date of the last commit in this repo
# diff: a summary of any differences in the current (run) version vs. the committed version
# Wrapped in try blocks so that the code will not fail if the pipeline or pypiper are not git repositories
gitvars = {}
try:
# pypiper dir
ppd = os.path.dirname(os.path.realpath(__file__))
gitvars['pypiper_dir'] = ppd
gitvars['pypiper_hash'] = subprocess.check_output("cd " + ppd + "; git rev-parse --verify HEAD 2>/dev/null", shell=True)
gitvars['pypiper_date'] = subprocess.check_output("cd " + ppd + "; git show -s --format=%ai HEAD 2>/dev/null", shell=True)
gitvars['pypiper_diff'] = subprocess.check_output("cd " + ppd + "; git diff --shortstat HEAD 2>/dev/null", shell=True)
gitvars['pypiper_branch'] = subprocess.check_output("cd " + ppd + "; git branch | grep '*' 2>/dev/null", shell=True)
except Exception:
pass
try:
# pipeline dir
pld = os.path.dirname(os.path.realpath(sys.argv[0]))
gitvars['pipe_dir'] = pld
gitvars['pipe_hash'] = subprocess.check_output("cd " + pld + "; git rev-parse --verify HEAD 2>/dev/null", shell=True)
gitvars['pipe_date'] = subprocess.check_output("cd " + pld + "; git show -s --format=%ai HEAD 2>/dev/null", shell=True)
gitvars['pipe_diff'] = subprocess.check_output("cd " + pld + "; git diff --shortstat HEAD 2>/dev/null", shell=True)
gitvars['pipe_branch'] = subprocess.check_output("cd " + pld + "; git branch | grep '*' 2>/dev/null", shell=True)
except Exception:
pass
# Print out a header section in the pipeline log:
# Wrap things in backticks to prevent markdown from interpreting underscores as emphasis.
# print("----------------------------------------")
print("### [Pipeline run code and environment:]\n")
print("* " + "Command".rjust(20) + ": " + "`" + str(" ".join(sys.argv)) + "`")
print("* " + "Compute host".rjust(20) + ": " + platform.node())
print("* " + "Working dir".rjust(20) + ": " + os.getcwd())
print("* " + "Outfolder".rjust(20) + ": " + self.outfolder)
self.timestamp("* " + "Pipeline started at".rjust(20) + ": ")
print("\n### [Version log:]\n")
print("* " + "Python version".rjust(20) + ": " + platform.python_version())
try:
print("* " + "Pypiper dir".rjust(20) + ": " + "`" + gitvars['pypiper_dir'].strip() + "`")
print("* " + "Pypiper version".rjust(20) + ": " + __version__)
print("* " + "Pypiper hash".rjust(20) + ": " + str(gitvars['pypiper_hash']).strip())
print("* " + "Pypiper branch".rjust(20) + ": " + str(gitvars['pypiper_branch']).strip())
print("* " + "Pypiper date".rjust(20) + ": " + str(gitvars['pypiper_date']).strip())
if "" != str(gitvars['pypiper_diff']):
print("* " + "Pypiper diff".rjust(20) + ": " + str(gitvars['pypiper_diff']).strip())
except KeyError:
# It is ok if keys aren't set, it means pypiper isn't in a git repo.
pass
try:
print("* " + "Pipeline dir".rjust(20) + ": " + "`" + gitvars['pipe_dir'].strip() + "`")
print("* " + "Pipeline version".rjust(20) + ": " + str(self.pl_version))
print("* " + "Pipeline hash".rjust(20) + ": " + str(gitvars['pipe_hash']).strip())
print("* " + "Pipeline branch".rjust(20) + ": " + str(gitvars['pipe_branch']).strip())
print("* " + "Pipeline date".rjust(20) + ": " + str(gitvars['pipe_date']).strip())
if (gitvars['pipe_diff'] != ""):
print("* " + "Pipeline diff".rjust(20) + ": " + str(gitvars['pipe_diff']).strip())
except KeyError:
# It is ok if keys aren't set, it means the pipeline isn't a git repo.
pass
# Print all arguments (if any)
print("\n### [Arguments passed to pipeline:]\n")
for arg, val in (vars(args) if args else dict()).items():
argtext = "`{}`".format(arg)
valtext = "`{}`".format(val)
print("* {}: {}".format(argtext.rjust(20), valtext))
print("\n----------------------------------------\n")
self._set_status_flag(RUN_FLAG)
# Record the start in PIPE_profile and PIPE_commands output files so we
# can trace which run they belong to
with open(self.pipeline_commands_file, "a") as myfile:
myfile.write("# Pipeline started at " + time.strftime("%m-%d %H:%M:%S", time.localtime(self.starttime)) + "\n\n")
with open(self.pipeline_profile_file, "a") as myfile:
myfile.write("# Pipeline started at " + time.strftime("%m-%d %H:%M:%S", time.localtime(self.starttime)) + "\n\n") | 0.006264 |
def _get_ppa_info_from_launchpad(owner_name, ppa_name):
'''
Idea from softwareproperties.ppa.
Uses urllib2 which sacrifices server cert verification.
This is used as fall-back code or for secure PPAs
:param owner_name:
:param ppa_name:
:return:
'''
lp_url = 'https://launchpad.net/api/1.0/~{0}/+archive/{1}'.format(
owner_name, ppa_name)
request = _Request(lp_url, headers={'Accept': 'application/json'})
lp_page = _urlopen(request)
return salt.utils.json.load(lp_page) | 0.001894 |
def _convert(reddit_session, data):
"""Return a Redditor object from the data."""
retval = Redditor(reddit_session, data['name'], fetch=False)
retval.id = data['id'].split('_')[1] # pylint: disable=C0103,W0201
return retval | 0.007813 |
def example_transform(v, row, row_n, i_s, i_d, header_s, header_d,scratch, errors, accumulator):
""" An example column transform.
This is an example of a column transform with all of the arguments listed. An real transform
can omit any ( or all ) of these, and can supply them in any order; the calling code will inspect the
signature.
When the function is listed as a transform for a column, it is called for every row of data.
:param v: The current value of the column
:param row: A RowProxy object for the whiole row.
:param row_n: The current row number.
:param i_s: The numeric index of the source column
:param i_d: The numeric index for the destination column
:param header_s: The name of the source column
:param header_d: The name of the destination column
:param scratch: A dict that can be used for storing any values. Persists between rows.
:param errors: A dict used to store error messages. Persists for all columns in a row, but not between rows.
:param accumulator: A dict for use in accumulating values, such as computing aggregates.
:return: The final value to be supplied for the column.
"""
return str(v)+'-foo' | 0.007457 |
def calculate_distribution(network_agents=None,
agent_type=None):
'''
Calculate the threshold values (thresholds for a uniform distribution)
of an agent distribution given the weights of each agent type.
The input has this form: ::
[
{'agent_type': 'agent_type_1',
'weight': 0.2,
'state': {
'id': 0
}
},
{'agent_type': 'agent_type_2',
'weight': 0.8,
'state': {
'id': 1
}
}
]
In this example, 20% of the nodes will be marked as type
'agent_type_1'.
'''
if network_agents:
network_agents = deepcopy(network_agents)
elif agent_type:
network_agents = [{'agent_type': agent_type}]
else:
raise ValueError('Specify a distribution or a default agent type')
# Calculate the thresholds
total = sum(x.get('weight', 1) for x in network_agents)
acc = 0
for v in network_agents:
if 'ids' in v:
v['threshold'] = STATIC_THRESHOLD
continue
upper = acc + (v.get('weight', 1)/total)
v['threshold'] = [acc, upper]
acc = upper
return network_agents | 0.000766 |
def cee_map_priority_table_map_cos6_pgid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map")
name_key = ET.SubElement(cee_map, "name")
name_key.text = kwargs.pop('name')
priority_table = ET.SubElement(cee_map, "priority-table")
map_cos6_pgid = ET.SubElement(priority_table, "map-cos6-pgid")
map_cos6_pgid.text = kwargs.pop('map_cos6_pgid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004894 |
def get_role_arn(role_name, env, region):
"""Get role ARN given role name.
Args:
role_name (str): Role name to lookup
env (str): Environment in which to lookup
region (str): Region
Returns:
ARN if role found
"""
session = boto3.Session(profile_name=env, region_name=region)
iam_client = session.client('iam')
LOG.debug('Searching for %s.', role_name)
role = iam_client.get_role(RoleName=role_name)
role_arn = role['Role']['Arn']
LOG.debug("Found role's %s ARN %s", role_name, role_arn)
return role_arn | 0.001712 |
def get_cover(song, size=250):
"""Download the cover art."""
try:
data = mus.search_releases(artist=song["artist"],
release=song["album"],
limit=1)
release_id = data["release-list"][0]["release-group"]["id"]
print(f"album: Using release-id: {data['release-list'][0]['id']}")
return mus.get_release_group_image_front(release_id, size=size)
except mus.NetworkError:
get_cover(song, size)
except mus.ResponseError:
print("error: Couldn't find album art for",
f"{song['artist']} - {song['album']}") | 0.00155 |
def add_variable(self, name, expression, overwrite=True, unique=True):
"""Add a variable to to a DataFrame.
A variable may refer to other variables, and virtual columns and expression may refer to variables.
Example
>>> df.add_variable('center', 0)
>>> df.add_virtual_column('x_prime', 'x-center')
>>> df.select('x_prime < 0')
:param: str name: name of virtual varible
:param: expression: expression for the variable
"""
if unique or overwrite or name not in self.variables:
existing_names = self.get_column_names(virtual=False) + list(self.variables.keys())
name = vaex.utils.find_valid_name(name, used=[] if not unique else existing_names)
self.variables[name] = expression
self.signal_variable_changed.emit(self, name, "add")
if unique:
return name | 0.005488 |
def configure_deletefor(self, ns, definition):
"""
Register a delete-for relation endpoint.
The definition's func should be a delete function, which must:
- accept kwargs for path data
- return truthy/falsey
:param ns: the namespace
:param definition: the endpoint definition
"""
@self.add_route(ns.relation_path, Operation.DeleteFor, ns)
@wraps(definition.func)
def delete(**path_data):
headers = dict()
response_data = dict()
require_response_data(definition.func(**path_data))
definition.header_func(headers, response_data)
response_format = self.negotiate_response_content(definition.response_formats)
return dump_response_data(
"",
None,
status_code=Operation.DeleteFor.value.default_code,
headers=headers,
response_format=response_format,
)
delete.__doc__ = "Delete a {} relative to a {}".format(pluralize(ns.object_name), ns.subject_name) | 0.003607 |
def delete_orderrun(backend, orderrun_id):
"""
Delete the orderrun specified by the argument.
"""
click.secho('%s - Deleting orderrun %s' % (get_datetime(), orderrun_id), fg='green')
check_and_print(DKCloudCommandRunner.delete_orderrun(backend.dki, orderrun_id.strip())) | 0.010345 |
def checkplot_infokey_worker(task):
'''This gets the required keys from the requested file.
Parameters
----------
task : tuple
Task is a two element tuple::
- task[0] is the dict to work on
- task[1] is a list of lists of str indicating all the key address to
extract items from the dict for
Returns
-------
list
This is a list of all of the items at the requested key addresses.
'''
cpf, keys = task
cpd = _read_checkplot_picklefile(cpf)
resultkeys = []
for k in keys:
try:
resultkeys.append(_dict_get(cpd, k))
except Exception as e:
resultkeys.append(np.nan)
return resultkeys | 0.001381 |
def transform(self, X):
"""Transform your data to zero mean unit variance."""
if not self.is_fit:
raise ValueError("The scaler has not been fit yet.")
return (X-self.mean) / (self.std + 10e-7) | 0.008772 |
def __generate_study_name(self):
"""
When a study name is not given, generate one with the format of " author - site name - year "
:return str study_name: generated study name
"""
study_name = ""
_exist = False
try:
if self.noaa_data_sorted["Top"]["Study_Name"]:
_exist = True
except KeyError:
pass
if not _exist:
try:
_site = self.noaa_data_sorted["Site_Information"]["properties"]["siteName"]
_year = self.noaa_data_sorted["Publication"][0]["pubYear"]
_author = self.noaa_data_sorted["Publication"][0]["author"]
_author = self.__get_author_last_name(_author)
study_name = "{}.{}.{}".format(_author, _site, _year)
study_name = study_name.replace(" ", "_").replace(",", "_")
except (KeyError, Exception):
pass
self.noaa_data_sorted["Top"]["Study_Name"] = study_name
self.noaa_data_sorted["Title"]["Study_Name"] = study_name
return | 0.003597 |
def dwrap(kx,nc):
'''compute a wrapped distance'''
q1 = np.mod(kx, nc)
q2 = np.minimum(q1, nc-q1)
return q2 | 0.01626 |
def _attachToObject(self, anchorObj, relationName) :
"dummy fct for compatibility reasons, a RabaListPupa is attached by default"
#MutableSequence.__getattribute__(self, "develop")()
self.develop()
self._attachToObject(anchorObj, relationName) | 0.031873 |
def install_caller_instruction(self, token_type="Unrestricted",
transaction_id=None):
"""
Set us up as a caller
This will install a new caller_token into the FPS section.
This should really only be called to regenerate the caller token.
"""
response = self.install_payment_instruction("MyRole=='Caller';",
token_type=token_type,
transaction_id=transaction_id)
body = response.read()
if(response.status == 200):
rs = ResultSet()
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
caller_token = rs.TokenId
try:
boto.config.save_system_option("FPS", "caller_token",
caller_token)
except(IOError):
boto.config.save_user_option("FPS", "caller_token",
caller_token)
return caller_token
else:
raise FPSResponseError(response.status, response.reason, body) | 0.003359 |
def init_widget(self):
""" Initialize the underlying widget.
"""
super(AndroidListView, self).init_widget()
d = self.declaration
self.set_arrangement(d.arrangement) | 0.009756 |
def get_encrypted_pin(self, clear_pin, card_number):
"""
Get PIN block in ISO 0 format, encrypted with the terminal key
"""
if not self.terminal_key:
print('Terminal key is not set')
return ''
if self.pinblock_format == '01':
try:
pinblock = bytes.fromhex(get_pinblock(clear_pin, card_number))
#print('PIN block: {}'.format(raw2str(pinblock)))
except TypeError:
return ''
encrypted_pinblock = self.tpk_cipher.encrypt(pinblock)
return raw2str(encrypted_pinblock)
else:
print('Unsupported PIN Block format')
return '' | 0.004225 |
async def revoke_credential(self):
"""
Revokes a credential.
:return: None
Example:
credential.revoke_credential()
"""
if not hasattr(IssuerCredential.revoke_credential, "cb"):
self.logger.debug("vcx_issuer_revoke_credential: Creating callback")
IssuerCredential.revoke_credential.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32))
c_credential_handle = c_uint32(self.handle)
await do_call('vcx_issuer_revoke_credential',
c_credential_handle,
IssuerCredential.revoke_credential.cb) | 0.006329 |
def add(self, index):
"""Calculate time elapsed from the point previously called
this method or this object is created to this is called.
Args:
index (int): Index to be displayed, and be used to take intervals.
"""
if (index - self.flush_at) < self.interval:
return
now = time.time()
elapsed = now - self.lap
elapsed_total = now - self.start
it = index - self.flush_at
self.lap = now
if self.verbose:
logger.info("iter={} {{{}}}={}[sec/{}iter] {}[sec]".format(
index, self.name, elapsed, it, elapsed_total))
if self.fd is not None:
print("{} {} {} {}".format(index, elapsed,
it, elapsed_total), file=self.fd)
self.flush_at = index | 0.002384 |
def from_dataframe(df,
source_col='source',
target_col='target',
interaction_col='interaction',
name='From DataFrame',
edge_attr_cols=[]):
"""
Utility to convert Pandas DataFrame object into Cytoscape.js JSON
:param df: Dataframe to convert.
:param source_col: Name of source column.
:param target_col: Name of target column.
:param interaction_col: Name of interaction column.
:param name: Name of network.
:param edge_attr_cols: List containing other columns to consider in df as
edges' attributes.
:return: Dictionary version of df.
"""
network = cyjs.get_empty_network(name=name)
nodes = set()
if edge_attr_cols is None:
edge_attr_cols = []
for index, row in df.iterrows():
s = row[source_col]
t = row[target_col]
if s not in nodes:
nodes.add(s)
source = get_node(s)
network['elements']['nodes'].append(source)
if t not in nodes:
nodes.add(t)
target = get_node(t)
network['elements']['nodes'].append(target)
extra_values = {column: row[column]
for column in edge_attr_cols
if column in df.columns}
network['elements']['edges'].append(
get_edge(s, t, interaction=row[interaction_col], **extra_values)
)
return network | 0.000674 |
def set_event_tags(self, id, **kwargs): # noqa: E501
"""Set all tags associated with a specific event # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_event_tags(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param list[str] body:
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_event_tags_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.set_event_tags_with_http_info(id, **kwargs) # noqa: E501
return data | 0.002191 |
def main(argv):
"""
Main mon
:param argv: console arguments
:return:
"""
input_file = ""
output_file = ""
monitor = None
formula = None
trace = None
iformula = None
itrace = None
isys = None
online = False
fuzzer = False
l2m = False
debug = False
rounds = 1
server_port = 8080
webservice = False
help_str_extended = "fodtlmon V 0.1 .\n" + \
"For more information see fodtlmon home page\n Usage : mon.py [OPTIONS] formula trace" + \
"\n -h \t--help " + "\t display this help and exit" + \
"\n -i \t--input= [file] " + "\t the input file" + \
"\n -o \t--output= [path]" + "\t the output file" + \
"\n -f \t--formula " + "\t the formula" + \
"\n \t--iformula " + "\t path to file that contains the formula" + \
"\n -t \t--trace " + "\t the trace" + \
"\n \t--itrace " + "\t path to file that contains the trace" + \
"\n -1 \t--ltl " + "\t use LTL monitor" + \
"\n \t--l2m " + "\t call ltl2mon also" + \
"\n -2 \t--fotl " + "\t use FOTL monitor" + \
"\n -3 \t--dtl " + "\t use DTL monitor" + \
"\n -4 \t--fodtl " + "\t use FODTL monitor" + \
"\n \t--sys= [file] " + "\t Run a system from json file" + \
"\n \t--rounds= int " + "\t Number of rounds to run in the system" + \
"\n -z \t--fuzzer " + "\t run fuzzing tester" + \
"\n -d \t--debug " + "\t enable debug mode" + \
"\n \t--server " + "\t start web service" + \
"\n \t--port= int " + "\t server port number" + \
"\n\nReport fodtlmon bugs to [email protected]" + \
"\nfodtlmon home page: <https://github.com/hkff/fodtlmon>" + \
"\nfodtlmon is a free software released under GPL 3"
# Checking options
try:
opts, args = getopt.getopt(argv[1:], "hi:o:f:t:1234zd",
["help", "input=", "output=", "trace=", "formula=" "ltl", "fotl", "dtl",
"fodtl", "sys=", "fuzzer", "itrace=", "iformula=", "rounds=", "l2m", "debug",
"server", "port="])
except getopt.GetoptError:
print(help_str_extended)
sys.exit(2)
if len(opts) == 0:
print(help_str_extended)
# Handling options
for opt, arg in opts:
if opt in ("-h", "--help"):
print(help_str_extended)
sys.exit()
elif opt in ("-i", "--input"):
input_file = arg
elif opt in ("-o", "--output"):
output_file = arg
elif opt in ("-1", "--ltl"):
monitor = Ltlmon
elif opt in ("-2", "--fotl"):
monitor = Fotlmon
elif opt in ("-3", "--dtl"):
monitor = Dtlmon
elif opt in ("-4", "--fodtl"):
monitor = Fodtlmon
elif opt in ("-f", "--formula"):
formula = arg
elif opt in ("-t", "--trace"):
trace = arg
elif opt in "--sys":
isys = arg
elif opt in "--rounds":
rounds = int(arg)
elif opt in ("-z", "--fuzzer"):
fuzzer = True
elif opt in "--iformula":
iformula = arg
elif opt in "--itrace":
itrace = arg
elif opt in "--l2m":
l2m = True
elif opt in ("-d", "--debug"):
debug = True
elif opt in "--server":
webservice = True
elif opt in "--port":
server_port = int(arg)
if webservice:
Webservice.start(server_port)
return
if fuzzer:
if monitor is Ltlmon:
run_ltl_tests(monitor="ltl", alphabet=["P"], constants=["a", "b", "c"], trace_lenght=10000, formula_depth=5,
formula_nbr=10000, debug=debug)
elif monitor is Dtlmon:
run_dtl_tests()
return
if itrace is not None:
with open(itrace, "r") as f:
trace = f.read()
if iformula is not None:
with open(iformula, "r") as f:
formula = f.read()
if isys is not None:
with open(isys, "r") as f:
js = f.read()
s = System.parseJSON(js)
for x in range(rounds):
s.run()
return
# print(argv)
if None not in (monitor, trace, formula):
tr = Trace().parse(trace)
fl = eval(formula[1:]) if formula.startswith(":") else FodtlParser.parse(formula)
mon = monitor(fl, tr)
res = mon.monitor(debug=debug)
print("")
print("Trace : %s" % tr)
print("Formula : %s" % fl)
print("Code : %s" % fl.toCODE())
print("PPrint : %s" % fl.prefix_print())
print("TSPASS : %s" % fl.toTSPASS())
print("LTLFO : %s" % fl.toLTLFO())
print("Result : %s" % res)
if l2m:
print(fl.toLTLFO())
res = ltlfo2mon(fl.toLTLFO(), tr.toLTLFO())
print("ltl2mon : %s" % res) | 0.003415 |
def put(self, item, block=True, timeout=None, chill=True):
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
"""
with self.not_full:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize:
raise compat.queue.Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time.time() + timeout
while self._qsize() >= self.maxsize:
remaining = endtime - time.time()
if remaining <= 0.0:
raise compat.queue.Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
if (
not chill
or self._qsize() > self._chill_until
or (time.time() - self._last_unchill) > self._max_chill_time
):
self.not_empty.notify()
self._last_unchill = time.time() | 0.001153 |
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals()) | 0.001838 |
def set_provenance(self, provenance_id):
"""stub"""
if not self.my_osid_object_form._is_valid_string(
provenance_id, self.get_provenance_metadata()):
raise InvalidArgument('provenanceId')
self.my_osid_object_form._my_map['provenanceId'] = provenance_id | 0.006579 |
def save_hdf(self, filename, path='', overwrite=False, append=False):
"""Saves object data to HDF file (only works if MCMC is run)
Samples are saved to /samples location under given path,
and object properties are also attached, so suitable for
re-loading via :func:`StarModel.load_hdf`.
:param filename:
Name of file to save to. Should be .h5 file.
:param path: (optional)
Path within HDF file structure to save to.
:param overwrite: (optional)
If ``True``, delete any existing file by the same name
before writing.
:param append: (optional)
If ``True``, then if a file exists, then just the path
within the file will be updated.
"""
if os.path.exists(filename):
store = pd.HDFStore(filename)
if path in store:
store.close()
if overwrite:
os.remove(filename)
elif not append:
raise IOError('{} in {} exists. Set either overwrite or append option.'.format(path,filename))
else:
store.close()
self.samples.to_hdf(filename, '{}/samples'.format(path))
store = pd.HDFStore(filename)
attrs = store.get_storer('{}/samples'.format(path)).attrs
attrs.properties = self.properties
attrs.ic_type = type(self.ic)
attrs.maxAV = self.maxAV
attrs.max_distance = self.max_distance
attrs.min_logg = self.min_logg
attrs.use_emcee = self.use_emcee
attrs._mnest_basename = self._mnest_basename
attrs.name = self.name
store.close() | 0.003472 |
def run(self):
"""
Write the input/data files and run LAMMPS.
"""
lammps_cmd = self.lammps_bin + ['-in', self.input_filename]
print("Running: {}".format(" ".join(lammps_cmd)))
p = Popen(lammps_cmd, stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate()
return stdout, stderr | 0.005814 |
def award_points(target, key, reason="", source=None):
"""
Awards target the point value for key. If key is an integer then it's a
one off assignment and should be interpreted as the actual point value.
"""
point_value, points = get_points(key)
if not ALLOW_NEGATIVE_TOTALS:
total = points_awarded(target)
if total + points < 0:
reason = reason + "(floored from {0} to 0)".format(points)
points = -total
apv = AwardedPointValue(points=points, value=point_value, reason=reason)
if isinstance(target, get_user_model()):
apv.target_user = target
lookup_params = {
"target_user": target
}
else:
apv.target_object = target
lookup_params = {
"target_content_type": apv.target_content_type,
"target_object_id": apv.target_object_id,
}
if source is not None:
if isinstance(source, get_user_model()):
apv.source_user = source
else:
apv.source_object = source
apv.save()
if not TargetStat.update_points(points, lookup_params):
try:
sid = transaction.savepoint()
TargetStat._default_manager.create(
**dict(lookup_params, points=points)
)
transaction.savepoint_commit(sid)
except IntegrityError:
transaction.savepoint_rollback(sid)
TargetStat.update_points(points, lookup_params)
signals.points_awarded.send(
sender=target.__class__,
target=target,
key=key,
points=points,
source=source
)
new_points = points_awarded(target)
old_points = new_points - points
TargetStat.update_positions((old_points, new_points))
return apv | 0.000556 |
def normalize(self):
"""
Returns a new table with values ranging from -1 to 1, reaching at least
one of these, unless there's no data.
"""
max_abs = max(self.table, key=abs)
if max_abs == 0:
raise ValueError("Can't normalize zeros")
return self / max_abs | 0.006944 |
def encode(msg, strict=False, logger=None, timezone_offset=None):
"""
Encodes and returns the L{msg<Envelope>} as an AMF stream.
@param strict: Enforce strict encoding. Default is C{False}. Specifically
header/body lengths will be written correctly, instead of the default 0.
Default is `False`. Introduced in 0.4.
@param logger: Used to log interesting events whilst decoding a remoting
message.
@type logger: U{logging.Logger<http://
docs.python.org/library/logging.html#loggers>}
@param timezone_offset: The difference between the current timezone and
UTC. Date/times should always be handled in UTC to avoid confusion but
this is required for legacy systems.
@type timezone_offset: U{datetime.datetime.timedelta<http://
docs.python.org/library/datetime.html#datetime.timedelta}
@rtype: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
"""
stream = util.BufferedByteStream()
encoder = pyamf.get_encoder(pyamf.AMF0, stream, strict=strict,
timezone_offset=timezone_offset)
if msg.amfVersion == pyamf.AMF3:
encoder.use_amf3 = True
stream.write_ushort(msg.amfVersion)
stream.write_ushort(len(msg.headers))
for name, header in msg.headers.iteritems():
_write_header(name, header, int(msg.headers.is_required(name)),
stream, encoder, strict)
stream.write_short(len(msg))
for name, message in msg.iteritems():
encoder.context.clear()
_write_body(name, message, stream, encoder, strict)
stream.seek(0)
return stream | 0.002488 |
def list_xz (archive, compression, cmd, verbosity, interactive):
"""List a XZ archive."""
cmdlist = [cmd]
cmdlist.append('-l')
if verbosity > 1:
cmdlist.append('-v')
cmdlist.append(archive)
return cmdlist | 0.008475 |
def reformat_cmd(self, text):
""" reformat the text to be stripped of noise """
# remove az if there
text = text.replace('az', '')
# disregard defaulting symbols
if text and SELECT_SYMBOL['scope'] == text[0:2]:
text = text.replace(SELECT_SYMBOL['scope'], "")
if self.shell_ctx.default_command:
text = self.shell_ctx.default_command + ' ' + text
return text | 0.004577 |
def _configure(configuration_details):
"""Adds alias to shell config."""
path = Path(configuration_details.path).expanduser()
with path.open('a') as shell_config:
shell_config.write(u'\n')
shell_config.write(configuration_details.content)
shell_config.write(u'\n') | 0.003333 |
def error(self, msg, file=None):
"""
Outputs the error msg to the file if specified, or to the
io_manager's stderr if available, or to sys.stderr.
"""
self.error_encountered = True
file.write(self.error_prefix)
file.write(msg)
file.write('\n')
file.flush() | 0.006098 |
def make_model(self):
"""Return the assembled HTML content as a string.
Returns
-------
str
The assembled HTML as a string.
"""
stmts_formatted = []
stmt_rows = group_and_sort_statements(self.statements,
self.ev_totals if self.ev_totals else None)
for key, verb, stmts in stmt_rows:
# This will now be ordered by prevalence and entity pairs.
stmt_info_list = []
for stmt in stmts:
stmt_hash = stmt.get_hash(shallow=True)
ev_list = self._format_evidence_text(stmt)
english = self._format_stmt_text(stmt)
if self.ev_totals:
total_evidence = self.ev_totals.get(int(stmt_hash), '?')
if total_evidence == '?':
logger.warning('The hash %s was not found in the '
'evidence totals dict.' % stmt_hash)
evidence_count_str = '%s / %s' % (len(ev_list), total_evidence)
else:
evidence_count_str = str(len(ev_list))
stmt_info_list.append({
'hash': stmt_hash,
'english': english,
'evidence': ev_list,
'evidence_count': evidence_count_str})
short_name = make_string_from_sort_key(key, verb)
short_name_key = str(uuid.uuid4())
stmts_formatted.append((short_name, short_name_key, stmt_info_list))
metadata = {k.replace('_', ' ').title(): v
for k, v in self.metadata.items()}
if self.db_rest_url and not self.db_rest_url.endswith('statements'):
db_rest_url = self.db_rest_url + '/statements'
else:
db_rest_url = '.'
self.model = template.render(stmt_data=stmts_formatted,
metadata=metadata, title=self.title,
db_rest_url=db_rest_url)
return self.model | 0.002376 |
def str_to_inet(address):
"""Convert an a string IP address to a inet struct
Args:
address (str): String representation of address
Returns:
inet: Inet network address
"""
# First try ipv4 and then ipv6
try:
return socket.inet_pton(socket.AF_INET, address)
except socket.error:
return socket.inet_pton(socket.AF_INET6, address) | 0.002481 |
async def send_from_directory(
directory: FilePath,
file_name: str,
*,
mimetype: Optional[str]=None,
as_attachment: bool=False,
attachment_filename: Optional[str]=None,
add_etags: bool=True,
cache_timeout: Optional[int]=None,
conditional: bool=True,
last_modified: Optional[datetime]=None,
) -> Response:
"""Send a file from a given directory.
Arguments:
directory: Directory that when combined with file_name gives
the file path.
file_name: File name that when combined with directory gives
the file path.
See :func:`send_file` for the other arguments.
"""
file_path = safe_join(directory, file_name)
if not file_path.is_file():
raise NotFound()
return await send_file(
file_path,
mimetype=mimetype,
as_attachment=as_attachment,
attachment_filename=attachment_filename,
add_etags=add_etags,
cache_timeout=cache_timeout,
conditional=conditional,
last_modified=last_modified,
) | 0.013636 |
def clear_avatar(self):
"""Clears the asset.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_avatar_template
if (self.get_avatar_metadata().is_read_only() or
self.get_avatar_metadata().is_required()):
raise errors.NoAccess()
self._my_map['avatarId'] = self._avatar_default | 0.005545 |
def password_validator(self, form, field):
"""Ensure that passwords have at least 6 characters with one lowercase letter, one uppercase letter and one number.
Override this method to customize the password validator.
"""
# Convert string to list of characters
password = list(field.data)
password_length = len(password)
# Count lowercase, uppercase and numbers
lowers = uppers = digits = 0
for ch in password:
if ch.islower(): lowers += 1
if ch.isupper(): uppers += 1
if ch.isdigit(): digits += 1
# Password must have one lowercase letter, one uppercase letter and one digit
is_valid = password_length >= 6 and lowers and uppers and digits
if not is_valid:
raise ValidationError(
_('Password must have at least 6 characters with one lowercase letter, one uppercase letter and one number')) | 0.008395 |
def btox(data, sep=''):
"""Return the hex encoding of a blob (string)."""
# translate the blob into hex
hex_str = binascii.hexlify(data)
# inject the separator if it was given
if sep:
hex_str = sep.join(hex_str[i:i+2] for i in range(0, len(hex_str), 2))
# return the result
return hex_str | 0.003067 |
def compile_results(self):
"""Compile all results for the current test
"""
self._init_dataframes()
self.total_transactions = len(self.main_results['raw'])
self._init_dates() | 0.009346 |
def post_series_publish(self, id, **data):
"""
POST /series/:id/publish/
Publishes a repeating event series and all of its occurrences that are not already canceled or deleted. Once a date is cancelled it can still be uncancelled and can be viewed by the public. A deleted date cannot be undeleted and cannot by viewed by the public. In order for
publish to be permitted, the event must have all necessary information, including a name and description, an organizer,
at least one ticket, and valid payment options. This API endpoint will return argument errors for event fields that
fail to validate the publish requirements. Returns a boolean indicating success or failure of the publish.
field_error event.name MISSING
Your event must have a name to be published.
field_error event.start MISSING
Your event must have a start date to be published.
field_error event.end MISSING
Your event must have an end date to be published.
field_error event.start.timezone MISSING
Your event start and end dates must have matching time zones to be published.
field_error event.organizer MISSING
Your event must have an organizer to be published.
field_error event.currency MISSING
Your event must have a currency to be published.
field_error event.currency INVALID
Your event must have a valid currency to be published.
field_error event.tickets MISSING
Your event must have at least one ticket to be published.
field_error event.tickets.N.name MISSING
All tickets must have names in order for your event to be published. The N will be the ticket class ID with the
error.
field_error event.tickets.N.quantity_total MISSING
All non-donation tickets must have an available quantity value in order for your event to be published. The N
will be the ticket class ID with the error.
field_error event.tickets.N.cost MISSING
All non-donation tickets must have a cost (which can be ``0.00`` for free tickets) in order for your event to
be published. The N will be the ticket class ID with the error.
.. _unpublish-series-by-id:
"""
return self.post("/series/{0}/publish/".format(id), data=data) | 0.004669 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.