text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def encode(self):
"""Encode the DAT packet based on instance variables, populating
self.buffer, returning self."""
fmt = b"!HH%dsx" % len(self.errmsgs[self.errorcode])
log.debug("encoding ERR packet with fmt %s", fmt)
self.buffer = struct.pack(fmt,
self.opcode,
self.errorcode,
self.errmsgs[self.errorcode])
return self | 0.004264 |
def __get_keywords(self):
"""
Get all the keywords related of this page
Returns:
An array of strings
"""
txt = self.text
for line in txt:
for word in split_words(line):
yield(word) | 0.007435 |
def pop(self, count=None):
'''Passing in the queue from which to pull items, the current time,
when the locks for these returned items should expire, and the number
of items to be popped off.'''
results = [Job(self.client, **job) for job in json.loads(
self.client('pop', self.name, self.worker_name, count or 1))]
if count is None:
return (len(results) and results[0]) or None
return results | 0.00431 |
def distance_landscape_as_3d_data(self, x_axis, y_axis):
"""
Returns the distance landscape as three-dimensional data for the specified projection.
:param x_axis: variable to be plotted on the x axis of projection
:param y_axis: variable to be plotted on the y axis of projection
:return: a 3-tuple (x, y, z) where x and y are the lists of coordinates and z the list of distances at
respective coordinates
"""
if not self.distance_landscape:
raise Exception('No distance landscape returned. Re-run inference with return_distance_landscape=True')
index_x = self.parameter_index(x_axis)
index_y = self.parameter_index(y_axis)
x = []
y = []
z = []
for parameters, initial_conditions, distance in self.distance_landscape:
all_values = list(parameters) + list(initial_conditions)
x.append(all_values[index_x])
y.append(all_values[index_y])
z.append(distance)
return x, y, z | 0.005655 |
def set(self, key: URIRef, value: Union[Literal, BNode, URIRef, str, int], lang: Optional[str]=None):
""" Set the VALUE for KEY predicate in the Metadata Graph
:param key: Predicate to be set (eg. DCT.creator)
:param value: Value to be stored (eg. "Cicero")
:param lang: [Optional] Language of the value (eg. "la")
"""
if not isinstance(value, Literal) and lang is not None:
value = Literal(value, lang=lang)
elif not isinstance(value, (BNode, URIRef)):
value, _type = term._castPythonToLiteral(value)
if _type is None:
value = Literal(value)
else:
value = Literal(value, datatype=_type)
self.graph.set((self.asNode(), key, value)) | 0.006443 |
def safe_gz_unzip(contents):
''' Takes a file's contents passed as a string (contents) and either gz-unzips the contents and returns the uncompressed data or else returns the original contents.
This function raises an exception if passed what appears to be gz-zipped data (from the magic number) but if gzip fails to decompress the contents.
A cleaner method would use zlib directly rather than writing a temporary file but zlib.decompress(contents, 16+zlib.MAX_WBITS) fix did not work for me immediately and I had things to get done!'''
if len(contents) > 1 and ord(contents[0]) == 31 and ord(contents[1]) == 139:
#contents = zlib.decompress(contents, 16+zlib.MAX_WBITS)
fname = write_temp_file('/tmp', contents)
try:
f = gzip.open(fname, 'rb')
contents = f.read()
f.close()
except:
os.remove(fname)
raise
return contents
else:
return contents | 0.007143 |
def density(self, *args):
""" Mean density in g/cc
"""
M = self.mass(*args) * MSUN
V = 4./3 * np.pi * (self.radius(*args) * RSUN)**3
return M/V | 0.016304 |
def pause(self, scaling_group):
"""
Pauses all execution of the policies for the specified scaling group.
"""
uri = "/%s/%s/pause" % (self.uri_base, utils.get_id(scaling_group))
resp, resp_body = self.api.method_post(uri)
return None | 0.007117 |
def module_definition_from_mirteFile_dict(man, d):
""" Creates a ModuleDefinition instance from the dictionary <d> from
a mirte-file for the Manager instance <man>. """
m = ModuleDefinition()
if 'inherits' not in d:
d['inherits'] = list()
if 'settings' not in d:
d['settings'] = dict()
if 'implementedBy' in d:
m.implementedBy = d['implementedBy']
m.inherits = set(d['inherits'])
for p in d['inherits']:
if p not in man.modules:
raise ValueError("No such module %s" % p)
m.deps.update(man.modules[p].deps)
m.vsettings.update(man.modules[p].vsettings)
m.inherits.update(man.modules[p].inherits)
m.run = m.run or man.modules[p].run
if 'run' in d:
m.run = d['run']
if len(m.inherits) == 0:
m.inherits = set(['module'])
for k, v in six.iteritems(d['settings']):
if 'type' not in v:
if k not in m.vsettings:
raise ValueError("No such existing vsetting %s" % k)
if 'default' in v:
m.vsettings[k] = copy.copy(m.vsettings[k])
m.vsettings[k].default = v['default']
continue
if v['type'] in man.modules:
m.deps[k] = DepDefinition(v['type'], v.get('allownull', False))
elif v['type'] in man.valueTypes:
m.vsettings[k] = VSettingDefinition(
v['type'],
(man.valueTypes[v['type']](v['default'])
if 'default' in v else None)
)
else:
raise ValueError("No such module or valuetype %s" % v)
return m | 0.000611 |
def off(self, group):
"""Turn the LED off for a group."""
asyncio.ensure_future(self._send_led_on_off_request(group, 0),
loop=self._loop) | 0.010929 |
def reindex(config):
"""
Recreate the Search Index.
"""
request = config.task(_reindex).get_request()
config.task(_reindex).run(request) | 0.006369 |
def _create_validation_error(self,
name, # type: str
value, # type: Any
validation_outcome=None, # type: Any
error_type=None, # type: Type[ValidationError]
help_msg=None, # type: str
**kw_context_args):
""" The function doing the final error raising. """
# first merge the info provided in arguments and in self
error_type = error_type or self.error_type
help_msg = help_msg or self.help_msg
ctx = copy(self.kw_context_args)
ctx.update(kw_context_args)
# allow the class to override the name
name = self._get_name_for_errors(name)
if issubclass(error_type, TypeError) or issubclass(error_type, ValueError):
# this is most probably a custom error type, it is already annotated with ValueError and/or TypeError
# so use it 'as is'
new_error_type = error_type
else:
# Add the appropriate TypeError/ValueError base type dynamically
additional_type = None
if isinstance(validation_outcome, Exception):
if is_error_of_type(validation_outcome, TypeError):
additional_type = TypeError
elif is_error_of_type(validation_outcome, ValueError):
additional_type = ValueError
if additional_type is None:
# not much we can do here, let's assume a ValueError, that is more probable
additional_type = ValueError
new_error_type = add_base_type_dynamically(error_type, additional_type)
# then raise the appropriate ValidationError or subclass
return new_error_type(validator=self, var_value=value, var_name=name, validation_outcome=validation_outcome,
help_msg=help_msg, **ctx) | 0.006839 |
def negative_directional_movement(high_data, low_data):
"""
Negative Directional Movement (-DM).
-DM: if DWNMOVE > UPMOVE and DWNMOVE > 0 then -DM = DWNMOVE else -Dm = 0
"""
catch_errors.check_for_input_len_diff(high_data, low_data)
up_moves = calculate_up_moves(high_data)
down_moves = calculate_down_moves(low_data)
ndm = []
for idx in range(0, len(down_moves)):
if down_moves[idx] > up_moves[idx] and down_moves[idx] > 0:
ndm.append(down_moves[idx])
else:
ndm.append(0)
return ndm | 0.001764 |
def _init_browser(self):
"""
Ovveride this method with the appropriate way to prepare a logged in
browser.
"""
self.browser = mechanize.Browser()
self.browser.set_handle_robots(False)
self.browser.open(self.server_url + "/youraccount/login")
self.browser.select_form(nr=0)
try:
self.browser['nickname'] = self.user
self.browser['password'] = self.password
except:
self.browser['p_un'] = self.user
self.browser['p_pw'] = self.password
# Set login_method to be writable
self.browser.form.find_control('login_method').readonly = False
self.browser['login_method'] = self.login_method
self.browser.submit() | 0.003937 |
def create_gtr(params):
"""
parse the arguments referring to the GTR model and return a GTR structure
"""
model = params.gtr
gtr_params = params.gtr_params
if model == 'infer':
gtr = GTR.standard('jc', alphabet='aa' if params.aa else 'nuc')
else:
try:
kwargs = {}
if gtr_params is not None:
for param in gtr_params:
keyval = param.split('=')
if len(keyval)!=2: continue
if keyval[0] in ['pis', 'pi', 'Pi', 'Pis']:
keyval[0] = 'pi'
keyval[1] = list(map(float, keyval[1].split(',')))
elif keyval[0] not in ['alphabet']:
keyval[1] = float(keyval[1])
kwargs[keyval[0]] = keyval[1]
else:
print ("GTR params are not specified. Creating GTR model with default parameters")
gtr = GTR.standard(model, **kwargs)
infer_gtr = False
except:
print ("Could not create GTR model from input arguments. Using default (Jukes-Cantor 1969)")
gtr = GTR.standard('jc', alphabet='aa' if params.aa else 'nuc')
infer_gtr = False
return gtr | 0.006299 |
def update(self):
"""Update RAM memory stats using the input method."""
# Init new stats
stats = self.get_init_value()
if self.input_method == 'local':
# Update stats using the standard system lib
# Grab MEM using the psutil virtual_memory method
vm_stats = psutil.virtual_memory()
# Get all the memory stats (copy/paste of the psutil documentation)
# total: total physical memory available.
# available: the actual amount of available memory that can be given instantly to processes that request more memory in bytes; this is calculated by summing different memory values depending on the platform (e.g. free + buffers + cached on Linux) and it is supposed to be used to monitor actual memory usage in a cross platform fashion.
# percent: the percentage usage calculated as (total - available) / total * 100.
# used: memory used, calculated differently depending on the platform and designed for informational purposes only.
# free: memory not being used at all (zeroed) that is readily available; note that this doesn’t reflect the actual memory available (use ‘available’ instead).
# Platform-specific fields:
# active: (UNIX): memory currently in use or very recently used, and so it is in RAM.
# inactive: (UNIX): memory that is marked as not used.
# buffers: (Linux, BSD): cache for things like file system metadata.
# cached: (Linux, BSD): cache for various things.
# wired: (BSD, macOS): memory that is marked to always stay in RAM. It is never moved to disk.
# shared: (BSD): memory that may be simultaneously accessed by multiple processes.
self.reset()
for mem in ['total', 'available', 'percent', 'used', 'free',
'active', 'inactive', 'buffers', 'cached',
'wired', 'shared']:
if hasattr(vm_stats, mem):
stats[mem] = getattr(vm_stats, mem)
# Use the 'free'/htop calculation
# free=available+buffer+cached
stats['free'] = stats['available']
if hasattr(stats, 'buffers'):
stats['free'] += stats['buffers']
if hasattr(stats, 'cached'):
stats['free'] += stats['cached']
# used=total-free
stats['used'] = stats['total'] - stats['free']
elif self.input_method == 'snmp':
# Update stats using SNMP
if self.short_system_name in ('windows', 'esxi'):
# Mem stats for Windows|Vmware Esxi are stored in the FS table
try:
fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name],
bulk=True)
except KeyError:
self.reset()
else:
for fs in fs_stat:
# The Physical Memory (Windows) or Real Memory (VMware)
# gives statistics on RAM usage and availability.
if fs in ('Physical Memory', 'Real Memory'):
stats['total'] = int(fs_stat[fs]['size']) * int(fs_stat[fs]['alloc_unit'])
stats['used'] = int(fs_stat[fs]['used']) * int(fs_stat[fs]['alloc_unit'])
stats['percent'] = float(stats['used'] * 100 / stats['total'])
stats['free'] = stats['total'] - stats['used']
break
else:
# Default behavor for others OS
stats = self.get_stats_snmp(snmp_oid=snmp_oid['default'])
if stats['total'] == '':
self.reset()
return self.stats
for key in iterkeys(stats):
if stats[key] != '':
stats[key] = float(stats[key]) * 1024
# Use the 'free'/htop calculation
stats['free'] = stats['free'] - stats['total'] + (stats['buffers'] + stats['cached'])
# used=total-free
stats['used'] = stats['total'] - stats['free']
# percent: the percentage usage calculated as (total - available) / total * 100.
stats['percent'] = float((stats['total'] - stats['free']) / stats['total'] * 100)
# Update the stats
self.stats = stats
return self.stats | 0.003717 |
async def open(self):
"""Open handler connection and authenticate session.
If the handler is already open, this operation will do nothing.
A handler opened with this method must be explicitly closed.
It is recommended to open a handler within a context manager as
opposed to calling the method directly.
.. note:: This operation is not thread-safe.
Example:
.. literalinclude:: ../examples/async_examples/test_examples_async.py
:start-after: [START open_close_sender_directly]
:end-before: [END open_close_sender_directly]
:language: python
:dedent: 4
:caption: Explicitly open and close a Sender.
"""
if self.running:
return
self.running = True
try:
await self._handler.open_async(connection=self.connection)
while not await self._handler.client_ready_async():
await asyncio.sleep(0.05)
except Exception as e: # pylint: disable=broad-except
try:
await self._handle_exception(e)
except:
self.running = False
raise | 0.003252 |
def find_definition(project, code, offset, resource=None, maxfixes=1):
"""Return the definition location of the python name at `offset`
A `Location` object is returned if the definition location can be
determined, otherwise ``None`` is returned.
"""
fixer = fixsyntax.FixSyntax(project, code, resource, maxfixes)
pyname = fixer.pyname_at(offset)
if pyname is not None:
module, lineno = pyname.get_definition_location()
name = rope.base.worder.Worder(code).get_word_at(offset)
if lineno is not None:
start = module.lines.get_line_start(lineno)
def check_offset(occurrence):
if occurrence.offset < start:
return False
pyname_filter = occurrences.PyNameFilter(pyname)
finder = occurrences.Finder(project, name,
[check_offset, pyname_filter])
for occurrence in finder.find_occurrences(pymodule=module):
return Location(occurrence) | 0.000969 |
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result | 0.004089 |
def unpack_bitstring(length, is_float, is_signed, bits):
# type: (int, bool, bool, typing.Any) -> typing.Union[float, int]
"""
returns a value calculated from bits
:param length: length of signal in bits
:param is_float: value is float
:param bits: value as bits (array/iterable)
:param is_signed: value is signed
:return:
"""
if is_float:
types = {
32: '>f',
64: '>d'
}
float_type = types[length]
value, = struct.unpack(float_type, bytearray(int(''.join(b), 2) for b in grouper(bits, 8)))
else:
value = int(bits, 2)
if is_signed and bits[0] == '1':
value -= (1 << len(bits))
return value | 0.004138 |
def to_unicode(sorb, allow_eval=False):
r"""Ensure that strings are unicode (UTF-8 encoded).
Evaluate bytes literals that are sometimes accidentally created by str(b'whatever')
>>> to_unicode(b'whatever')
'whatever'
>>> to_unicode(b'b"whatever"')
'whatever'
>>> to_unicode(repr(b'b"whatever"'))
'whatever'
>>> to_unicode(str(b'b"whatever"'))
'whatever'
>>> to_unicode(str(str(b'whatever')))
'whatever'
>>> to_unicode(bytes(u'whatever', 'utf-8'))
'whatever'
>>> to_unicode(b'u"whatever"')
'whatever'
>>> to_unicode(u'b"whatever"')
'whatever'
There seems to be a bug in python3 core:
>>> str(b'whatever') # user intended str.decode(b'whatever') (str coercion) rather than python code repr
"b'whatever'"
>>> repr(str(b'whatever'))
'"b\'whatever\'"'
>>> str(repr(str(b'whatever')))
'"b\'whatever\'"'
>>> repr(str(repr(str(b'whatever'))))
'\'"b\\\'whatever\\\'"\''
>>> repr(repr(b'whatever'))
'"b\'whatever\'"'
>>> str(str(b'whatever'))
"b'whatever'"
>>> str(repr(b'whatever'))
"b'whatever'"
"""
if sorb is None:
return sorb
if isinstance(sorb, bytes):
sorb = sorb.decode()
for i, s in enumerate(["b'", 'b"', "u'", 'u"']):
if (sorb.startswith(s) and sorb.endswith(s[-1])):
# print(i)
return to_unicode(eval(sorb, {'__builtins__': None}, {}))
return sorb | 0.002066 |
def change_mime(self, bucket, key, mime):
"""修改文件mimeType:
主动修改指定资源的文件类型,具体规格参考:
http://developer.qiniu.com/docs/v6/api/reference/rs/chgm.html
Args:
bucket: 待操作资源所在空间
key: 待操作资源文件名
mime: 待操作文件目标mimeType
"""
resource = entry(bucket, key)
encode_mime = urlsafe_base64_encode(mime)
return self.__rs_do('chgm', resource, 'mime/{0}'.format(encode_mime)) | 0.004396 |
def prepack(self, namedstruct, skip_self=False, skip_sub=False):
'''
Run prepack
'''
if not skip_self and self.prepackfunc is not None:
self.prepackfunc(namedstruct) | 0.009569 |
def convert(csv, json, **kwargs):
'''Convert csv to json.
csv: filename or file-like object
json: filename or file-like object
if csv is '-' or None:
stdin is used for input
if json is '-' or None:
stdout is used for output
'''
csv_local, json_local = None, None
try:
if csv == '-' or csv is None:
csv = sys.stdin
elif isinstance(csv, str):
csv = csv_local = open(csv, 'r')
if json == '-' or json is None:
json = sys.stdout
elif isinstance(json, str):
json = json_local = open(json, 'w')
data = load_csv(csv, **kwargs)
save_json(data, json, **kwargs)
finally:
if csv_local is not None:
csv_local.close()
if json_local is not None:
json_local.close() | 0.001183 |
def main():
'''This is the main function of this script.
The current script args are shown below ::
Usage: checkplotlist [-h] [--search SEARCH] [--sortby SORTBY]
[--filterby FILTERBY] [--splitout SPLITOUT]
[--outprefix OUTPREFIX] [--maxkeyworkers MAXKEYWORKERS]
{pkl,png} cpdir
This makes a checkplot file list for use with the checkplot-viewer.html
(for checkplot PNGs) or the checkplotserver.py (for checkplot pickles)
webapps.
positional arguments:
{pkl,png} type of checkplot to search for: pkl -> checkplot
pickles, png -> checkplot PNGs
cpdir directory containing the checkplots to process
optional arguments:
-h, --help show this help message and exit
--search SEARCH file glob prefix to use when searching for checkplots,
default: '*checkplot*', (the extension is added
automatically - .png or .pkl)
--sortby SORTBY the sort key and order to use when sorting
--filterby FILTERBY the filter key and condition to use when filtering.
you can specify this multiple times to filter by
several keys at once. all filters are joined with a
logical AND operation in the order they're given.
--splitout SPLITOUT if there are more than SPLITOUT objects in the target
directory (default: 5000), checkplotlist will split
the output JSON into multiple files. this helps keep
the checkplotserver webapp responsive.
--outprefix OUTPREFIX
a prefix string to use for the output JSON file(s).
use this to separate out different sort orders or
filter conditions, for example. if this isn't
provided, but --sortby or --filterby are, will use
those to figure out the output files' prefixes
--maxkeyworkers MAXKEYWORKERS
the number of parallel workers that will be launched
to retrieve checkplot key values used for sorting and
filtering (default: 2)
'''
####################
## PARSE THE ARGS ##
####################
aparser = argparse.ArgumentParser(
epilog=PROGEPILOG,
description=PROGDESC,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
aparser.add_argument(
'cptype',
action='store',
choices=['pkl','png'],
type=str,
help=("type of checkplot to search for: pkl -> checkplot pickles, "
"png -> checkplot PNGs")
)
aparser.add_argument(
'cpdir',
action='store',
type=str,
help=("directory containing the checkplots to process")
)
# TODO: here, make the --search kwarg an array (i.e. allow multiple search
# statements). the use of this will be to make checkplotserver able to load
# more than one checkplot per object (i.e. different mag types -- epd
# vs. tfa -- or different bands -- r vs. i -- at the SAME time).
# TODO: we'll fix checkplotserver and its js so there's a vertical tab
# column between the left period/epoch/tags panel and main
# periodogram/phased-LCs panel on the right. the user will be able to flip
# between tabs to look at the object in all loaded alternative checkplots.
# TODO: need to also think about to sort/filter; for now let's make it so
# the sorting works on a chosen checkplot search list, if we give --search
# 'checkplot*iep1' and --search 'checkplot*itf1', specify --sortpkls and
# --filterpkls kwargs, which match the given globs for the --search
# kwargs. e.g. we'd specify --sortpkls 'checkplot*iep1' to sort everything
# by the specified --sortby values in those pickles.
# TODO: we'll have to change the output JSON so it's primarily by objectid
# instead of checkplot filenames. each objectid will have its own list of
# checkplots to use for the frontend.
aparser.add_argument(
'--search',
action='store',
default='*checkplot*',
type=str,
help=("file glob prefix to use when searching for checkplots, "
"default: '%(default)s', "
"(the extension is added automatically - .png or .pkl)")
)
aparser.add_argument(
'--sortby',
action='store',
type=str,
help=("the sort key and order to use when sorting")
)
aparser.add_argument(
'--filterby',
action='append',
type=str,
help=("the filter key and condition to use when filtering. "
"you can specify this multiple times to filter by "
"several keys at once. all filters are joined with a "
"logical AND operation in the order they're given.")
)
aparser.add_argument(
'--splitout',
action='store',
type=int,
default=5000,
help=("if there are more than SPLITOUT objects in "
"the target directory (default: %(default)s), "
"checkplotlist will split the output JSON into multiple files. "
"this helps keep the checkplotserver webapp responsive.")
)
aparser.add_argument(
'--outprefix',
action='store',
type=str,
help=("a prefix string to use for the output JSON file(s). "
"use this to separate out different sort orders "
"or filter conditions, for example. "
"if this isn't provided, but --sortby or --filterby are, "
"will use those to figure out the output files' prefixes")
)
aparser.add_argument(
'--maxkeyworkers',
action='store',
type=int,
default=int(CPU_COUNT/4.0),
help=("the number of parallel workers that will be launched "
"to retrieve checkplot key values used for "
"sorting and filtering (default: %(default)s)")
)
args = aparser.parse_args()
checkplotbasedir = args.cpdir
fileglob = args.search
splitout = args.splitout
outprefix = args.outprefix if args.outprefix else None
# see if there's a sorting order
if args.sortby:
sortkey, sortorder = args.sortby.split('|')
if outprefix is None:
outprefix = args.sortby
else:
sortkey, sortorder = 'objectid', 'asc'
# see if there's a filter condition
if args.filterby:
filterkeys, filterconditions = [], []
# load all the filters
for filt in args.filterby:
f = filt.split('|')
filterkeys.append(f[0])
filterconditions.append(f[1])
# generate the output file's prefix
if outprefix is None:
outprefix = '-'.join(args.filterby)
else:
outprefix = '%s-%s' % ('-'.join(args.filterby), outprefix)
else:
filterkeys, filterconditions = None, None
if args.cptype == 'pkl':
checkplotext = 'pkl'
elif args.cptype == 'png':
checkplotext = 'png'
else:
print("unknown format for checkplots: %s! can't continue!"
% args.cptype)
sys.exit(1)
#######################
## NOW START WORKING ##
#######################
currdir = os.getcwd()
checkplotglob = os.path.join(checkplotbasedir,
'%s.%s' % (fileglob, checkplotext))
print('searching for checkplots: %s' % checkplotglob)
searchresults = glob.glob(checkplotglob)
if searchresults:
print('found %s checkplot files in dir: %s' %
(len(searchresults), checkplotbasedir))
# see if we should sort the searchresults in some special order
# this requires an arg on the commandline of the form:
# '<sortkey>-<asc|desc>'
# where sortkey is some key in the checkplot pickle:
# this can be a simple key: e.g. objectid
# or it can be a composite key: e.g. varinfo.varfeatures.stetsonj
# and sortorder is either 'asc' or desc' for ascending/descending sort
# we only support a single condition conditions are of the form:
# '<filterkey>-<condition>@<operand>' where <condition> is one of: 'ge',
# 'gt', 'le', 'lt', 'eq' and <operand> is a string, float, or int to use
# when applying <condition>
# first, take care of sort keys
sortdone = False
# second, take care of any filters
filterok = False
filterstatements = []
# make sure we only run these operations on checkplot pickles
if ((args.cptype == 'pkl') and
((sortkey and sortorder) or (filterkeys and filterconditions))):
keystoget = []
# handle sorting
if (sortkey and sortorder):
print('sorting checkplot pickles by %s in order: %s' %
(sortkey, sortorder))
# dereference the sort key
sortkeys = sortkey.split('.')
# if there are any integers in the sortkeys strings, interpret
# these to mean actual integer indexes of lists or integer keys
# for dicts this allows us to move into arrays easily by
# indexing them
if sys.version_info[:2] < (3,4):
sortkeys = [(int(x) if x.isdigit() else x)
for x in sortkeys]
else:
sortkeys = [(int(x) if x.isdecimal() else x)
for x in sortkeys]
keystoget.append(sortkeys)
# handle filtering
if (filterkeys and filterconditions):
print('filtering checkplot pickles by %s using: %s' %
(filterkeys, filterconditions))
# add all the filtkeys to the list of keys to get
for fdk in filterkeys:
# dereference the filter dict key
fdictkeys = fdk.split('.')
fdictkeys = [(int(x) if x.isdecimal() else x)
for x in fdictkeys]
keystoget.append(fdictkeys)
print('retrieving checkplot info using %s workers...'
% args.maxkeyworkers)
# launch the key retrieval
pool = mp.Pool(args.maxkeyworkers)
tasks = [(x, keystoget) for x in searchresults]
keytargets = pool.map(checkplot_infokey_worker, tasks)
pool.close()
pool.join()
# now that we have keys, we need to use them
# keys will be returned in the order we put them into keystoget
# if keystoget is more than 1 element, then it's either sorting
# followed by filtering (multiple)...
if (len(keystoget) > 1 and
(sortkey and sortorder) and
(filterkeys and filterconditions)):
# the first elem is sort key targets
sorttargets = [x[0] for x in keytargets]
# all of the rest are filter targets
filtertargets = [x[1:] for x in keytargets]
# otherwise, it's just multiple filters
elif (len(keystoget) > 1 and
(not (sortkey and sortorder)) and
(filterkeys and filterconditions)):
sorttargets = None
filtertargets = keytargets
# if there's only one element in keytoget, then it's either just a
# sort target...
elif (len(keystoget) == 1 and
(sortkey and sortorder) and
(not(filterkeys and filterconditions))):
sorttargets = keytargets
filtertargets = None
# or it's just a filter target
elif (len(keystoget) == 1 and
(filterkeys and filterconditions) and
(not(sortkey and sortorder))):
sorttargets = None
filtertargets = keytargets
# turn the search results into an np.array before we do
# sorting/filtering
searchresults = np.array(searchresults)
if sorttargets:
sorttargets = np.ravel(np.array(sorttargets))
sortind = np.argsort(sorttargets)
if sortorder == 'desc':
sortind = sortind[::-1]
# sort the search results in the requested order
searchresults = searchresults[sortind]
sortdone = True
if filtertargets:
# don't forget to also sort the filtertargets in the same order
# as sorttargets so we can get the correct objects to filter.
# now figure out the filter conditions: <condition>@<operand>
# where <condition> is one of: 'ge', 'gt', 'le', 'lt', 'eq' and
# <operand> is a string, float, or int to use when applying
# <condition>
finalfilterind = []
for ind, fcond in enumerate(filterconditions):
thisftarget = np.array([x[ind] for x in filtertargets])
if (sortdone):
thisftarget = thisftarget[sortind]
try:
foperator, foperand = fcond.split('@')
foperator = FILTEROPS[foperator]
# we'll do a straight eval of the filter
# yes: this is unsafe
filterstr = (
'np.isfinite(thisftarget) & (thisftarget %s %s)' %
(foperator, foperand)
)
filterind = eval(filterstr)
# add this filter to the finalfilterind
finalfilterind.append(filterind)
# update the filterstatements
filterstatements.append('%s %s %s' % (filterkeys[ind],
foperator,
foperand))
except Exception as e:
print('ERR! could not understand filter spec: %s'
'\nexception was: %s' %
(args.filterby[ind], e))
print('WRN! not applying broken filter')
#
# DONE with evaluating each filter, get final results below
#
# column stack the overall filter ind
finalfilterind = np.column_stack(finalfilterind)
# do a logical AND across the rows
finalfilterind = np.all(finalfilterind, axis=1)
# these are the final results after ANDing all the filters
filterresults = searchresults[finalfilterind]
# make sure we got some results
if filterresults.size > 0:
print('filters applied: %s -> objects found: %s ' %
(repr(args.filterby), filterresults.size))
searchresults = filterresults
filterok = True
# otherwise, applying all of the filters killed everything
else:
print('WRN! filtering failed! %s -> ZERO objects found!' %
(repr(args.filterby), ))
print('WRN! not applying any filters')
# all done with sorting and filtering
# turn the searchresults back into a list
searchresults = searchresults.tolist()
# if there's no special sort order defined, use the usual sort order
# at the end after filtering
if not(sortkey and sortorder):
print('WRN! no special sort key and order/'
'filter key and condition specified, '
'sorting checkplot pickles '
'using usual alphanumeric sort...')
searchresults = sorted(searchresults)
sortkey = 'filename'
sortorder = 'asc'
nchunks = int(len(searchresults)/splitout) + 1
searchchunks = [searchresults[x*splitout:x*splitout+splitout] for x
in range(nchunks)]
if nchunks > 1:
print('WRN! more than %s checkplots in final list, '
'splitting into %s chunks' % (splitout, nchunks))
# if the filter failed, zero out filterkey
if (filterkeys and filterconditions) and not filterok:
filterstatements = []
# generate the output
for chunkind, chunk in enumerate(searchchunks):
# figure out if we need to split the JSON file
outjson = os.path.abspath(
os.path.join(
currdir,
'%scheckplot-filelist%s.json' % (
('%s-' % outprefix if outprefix is not None else ''),
('-%02i' % chunkind if len(searchchunks) > 1 else ''),
)
)
)
outjson = outjson.replace('|','_')
outjson = outjson.replace('@','_')
# ask if the checkplot list JSON should be updated
if os.path.exists(outjson):
if sys.version_info[:2] < (3,0):
answer = raw_input(
'There is an existing '
'checkplot list file in this '
'directory:\n %s\nDo you want to '
'overwrite it completely? (default: no) [y/n] ' %
outjson
)
else:
answer = input(
'There is an existing '
'checkplot list file in this '
'directory:\n %s\nDo you want to '
'overwrite it completely? (default: no) [y/n] ' %
outjson
)
# if it's OK to overwrite, then do so
if answer and answer == 'y':
with open(outjson,'w') as outfd:
print('WRN! completely overwriting '
'existing checkplot list %s' % outjson)
outdict = {
'checkplots':chunk,
'nfiles':len(chunk),
'sortkey':sortkey,
'sortorder':sortorder,
'filterstatements':filterstatements
}
json.dump(outdict,outfd)
# if it's not OK to overwrite, then
else:
# read in the outjson, and add stuff to it for objects that
# don't have an entry
print('only updating existing checkplot list '
'file with any new checkplot pickles')
with open(outjson,'r') as infd:
indict = json.load(infd)
# update the checkplot list, sortorder, and sortkey only
indict['checkplots'] = chunk
indict['nfiles'] = len(chunk)
indict['sortkey'] = sortkey
indict['sortorder'] = sortorder
indict['filterstatements'] = filterstatements
# write the updated to back to the file
with open(outjson,'w') as outfd:
json.dump(indict, outfd)
# if this is a new output file
else:
with open(outjson,'w') as outfd:
outdict = {'checkplots':chunk,
'nfiles':len(chunk),
'sortkey':sortkey,
'sortorder':sortorder,
'filterstatements':filterstatements}
json.dump(outdict,outfd)
if os.path.exists(outjson):
print('checkplot file list written to %s' % outjson)
else:
print('ERR! writing the checkplot file list failed!')
else:
print('ERR! no checkplots found in %s' % checkplotbasedir) | 0.002236 |
def dump_pytorch_graph(graph):
"""List all the nodes in a PyTorch graph."""
f = "{:25} {:40} {} -> {}"
print(f.format("kind", "scopeName", "inputs", "outputs"))
for node in graph.nodes():
print(f.format(node.kind(), node.scopeName(),
[i.unique() for i in node.inputs()],
[i.unique() for i in node.outputs()]
)) | 0.002469 |
def pct_decode(s):
"""
Return the percent-decoded version of string s.
>>> pct_decode('%43%6F%75%63%6F%75%2C%20%6A%65%20%73%75%69%73%20%63%6F%6E%76%69%76%69%61%6C')
'Coucou, je suis convivial'
>>> pct_decode('')
''
>>> pct_decode('%2525')
'%25'
"""
if s is None:
return None
elif not isinstance(s, unicode):
s = str(s)
else:
s = s.encode('utf8')
return PERCENT_CODE_SUB(lambda mo: chr(int(mo.group(0)[1:], 16)), s) | 0.004065 |
def config(path=None, root=None, db=None):
"""Return the default run_config object for this installation."""
import ambry.run
return ambry.run.load(path=path, root=root, db=db) | 0.005319 |
def _set_mpls_adjust_bandwidth_lsp_all(self, v, load=False):
"""
Setter method for mpls_adjust_bandwidth_lsp_all, mapped from YANG variable /brocade_mpls_rpc/mpls_adjust_bandwidth_lsp_all (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_adjust_bandwidth_lsp_all is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_adjust_bandwidth_lsp_all() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mpls_adjust_bandwidth_lsp_all.mpls_adjust_bandwidth_lsp_all, is_leaf=True, yang_name="mpls-adjust-bandwidth-lsp-all", rest_name="mpls-adjust-bandwidth-lsp-all", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'mplsAdjustBandwidthAll'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_adjust_bandwidth_lsp_all must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=mpls_adjust_bandwidth_lsp_all.mpls_adjust_bandwidth_lsp_all, is_leaf=True, yang_name="mpls-adjust-bandwidth-lsp-all", rest_name="mpls-adjust-bandwidth-lsp-all", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'mplsAdjustBandwidthAll'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""",
})
self.__mpls_adjust_bandwidth_lsp_all = t
if hasattr(self, '_set'):
self._set() | 0.005892 |
def p_IndexTypes(self, p):
"""IndexTypes : IndexTypes ',' IndexType
| IndexType"""
n = len(p)
if n == 4:
p[0] = p[1] + [p[3]]
elif n == 2:
p[0] = [p[1]] | 0.008696 |
def _dequeue_function(self):
""" Internal method to dequeue to events. """
from UcsBase import WriteUcsWarning, _GenericMO, WriteObject, UcsUtils
while len(self._wbs):
lowestTimeout = None
for wb in self._wbs:
pollSec = wb.params["pollSec"]
managedObject = wb.params["managedObject"]
timeoutSec = wb.params["timeoutSec"]
transientValue = wb.params["transientValue"]
successValue = wb.params["successValue"]
failureValue = wb.params["failureValue"]
prop = wb.params["prop"]
startTime = wb.params["startTime"]
gmo = None
pmo = None
mce = None
if (pollSec != None and managedObject != None):
crDn = self.ConfigResolveDn(managedObject.getattr("Dn"), inHierarchical=YesOrNo.FALSE,
dumpXml=YesOrNo.FALSE)
if (crDn.errorCode != 0):
# TODO: Add Warning/Error messages in Logger.
WriteUcsWarning(
'[Error]: WatchUcs [Code]:' + crDn.errorCode + ' [Description]:' + crDn.errorDescr)
continue
for eachMo in crDn.OutConfig.GetChild():
pmo = eachMo
if pmo == None:
# TODO: Add Warning/Error messages in Logger.
WriteUcsWarning('Mo ' + managedObject.getattr("Dn") + ' not found.')
continue
gmo = _GenericMO(mo=pmo, option=WriteXmlOption.All)
else:
ts = datetime.datetime.now() - startTime
timeoutMs = 0
if (timeoutSec != None):
if (ts.seconds >= timeoutSec): # TimeOut
self._remove_watch_block(wb)
continue
timeoutMs = (timeoutSec - ts.seconds)
if (lowestTimeout == None):
lowestTimeout = timeoutMs
else:
if (lowestTimeout > timeoutMs):
lowestTimeout = timeoutMs
if (timeoutMs > 0):
mce = wb.Dequeue(timeoutMs)
else:
mce = wb.Dequeue(2147483647)
if mce == None:
# break
continue
if (managedObject == None): # Means parameterset is not Mo
if wb.cb != None:
wb.cb(mce)
continue
if mce != None:
gmo = _GenericMO(mo=mce.mo, option=WriteXmlOption.All)
attributes = []
if mce == None:
attributes = gmo.properties.keys()
else:
attributes = mce.changeList
if prop.lower() in (attr.lower() for attr in attributes):
if (len(successValue) > 0 and gmo.GetAttribute(UcsUtils.WordU(prop)) in successValue):
if mce != None:
if wb.cb != None:
wb.cb(mce)
else:
if wb.cb != None:
wb.cb(UcsMoChangeEvent(eventId=0, mo=pmo, changeList=prop))
if wb != None:
self._remove_watch_block(wb)
wb = None
break
# return
continue
if (len(failureValue) > 0 and gmo.GetAttribute(UcsUtils.WordU(prop)) in failureValue):
# TODO: Add Warning/Error messages in Logger.
WriteUcsWarning('Encountered error value ' + gmo.GetAttribute(
UcsUtils.WordU(prop)) + ' for property ' + prop + '.')
if mce != None:
if wb.cb != None:
wb.cb(mce)
else:
if wb.cb != None:
wb.cb(UcsMoChangeEvent(eventId=0, mo=pmo, changeList=prop))
if wb != None:
self._remove_watch_block(wb) # TODO: implement removeStop call back
wb = None
break
continue
if ((len(transientValue) > 0) and (not gmo.GetAttribute(UcsUtils.WordU(prop)) in transientValue)):
# TODO: Add Warning/Error messages in Logger.
WriteUcsWarning('Encountered unknown value ' + gmo.GetAttribute(
UcsUtils.WordU(prop)) + ' for property ' + prop + '.')
if mce != None:
if wb.cb != None:
wb.cb(mce)
else:
if wb.cb != None:
wb.cb(UcsMoChangeEvent(eventId=0, mo=pmo, changeList=prop))
if wb != None:
self._remove_watch_block(wb) # TODO: implement removeStop call back
wb = None
break
continue
if (pollSec != None):
pollMs = pollSec
if (timeoutSec != None):
pts = datetime.datetime.now() - startTime
if (pts.seconds >= timeoutSec): # TimeOut
break
timeoutMs = (timeoutSec - pts.seconds)
if (timeoutMs < pollSec):
pollMs = pts.seconds
# time.sleep(pollMs)
if (lowestTimeout == None):
lowestTimeout = pollMs
else:
if (lowestTimeout > pollMs):
lowestTimeout = pollMs
if len(self._wbs):
self._dequeue_wait(lowestTimeout)
return | 0.036284 |
def dispose(self):
"""Disposes the :py:class:`securityhandlerhelper` object."""
self._username = None
self._password = None
self._org_url = None
self._proxy_url = None
self._proxy_port = None
self._token_url = None
self._securityHandler = None
self._valid = None
self._message = None
del self._username
del self._password
del self._org_url
del self._proxy_url
del self._proxy_port
del self._token_url
del self._securityHandler
del self._valid
del self._message | 0.003263 |
def destroy(self):
"""
Declare that you are done with this OptionParser. This cleans up
reference cycles so the OptionParser (and all objects referenced by
it) can be garbage-collected promptly. After calling destroy(), the
OptionParser is unusable.
"""
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter | 0.004024 |
def _resolve_assignment_parts(parts, assign_path, context):
"""recursive function to resolve multiple assignments"""
assign_path = assign_path[:]
index = assign_path.pop(0)
for part in parts:
assigned = None
if isinstance(part, nodes.Dict):
# A dictionary in an iterating context
try:
assigned, _ = part.items[index]
except IndexError:
return
elif hasattr(part, "getitem"):
index_node = nodes.Const(index)
try:
assigned = part.getitem(index_node, context)
except (exceptions.AstroidTypeError, exceptions.AstroidIndexError):
return
if not assigned:
return
if not assign_path:
# we achieved to resolved the assignment path, don't infer the
# last part
yield assigned
elif assigned is util.Uninferable:
return
else:
# we are not yet on the last part of the path search on each
# possibly inferred value
try:
yield from _resolve_assignment_parts(
assigned.infer(context), assign_path, context
)
except exceptions.InferenceError:
return | 0.000759 |
def save_reg(data):
'''
Save the register to msgpack files
'''
reg_dir = _reg_dir()
regfile = os.path.join(reg_dir, 'register')
try:
if not os.path.exists(reg_dir):
os.makedirs(reg_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
try:
with salt.utils.files.fopen(regfile, 'a') as fh_:
salt.utils.msgpack.dump(data, fh_)
except Exception:
log.error('Could not write to msgpack file %s', __opts__['outdir'])
raise | 0.001757 |
def get_grid(start, end, nsteps=100):
"""
Generates a equal distanced list of float values with nsteps+1 values, begining start and ending with end.
:param start: the start value of the generated list.
:type float
:param end: the end value of the generated list.
:type float
:param nsteps: optional the number of steps (default=100), i.e. the generated list contains nstep+1 values.
:type int
"""
step = (end-start) / float(nsteps)
return [start + i * step for i in xrange(nsteps+1)] | 0.005607 |
def log_deferred(op, log_id, every_n=1, first_n=None):
"""Helper method inserting compliance logging ops.
Note: This helper is not guaranteed to be efficient, as it will insert ops
and control dependencies. If this proves to be a bottleneck, submitters
may wish to consider other methods such as extracting values from an
.events file.
Args:
op: A tf op to be printed.
log_id: a uuid provided by the logger in mlperf_log.py
every_n: If repeat is True, with what frequency should the input op be '
logged. If repeat is False, this argument is ignored.
first_n: Only log this many values. This arg does not interact with every_n.
The first_n refers to the first n that would have been logged.
"""
prefix = ":::MLPv0.5.0 [{}]".format(log_id)
if not first_n is not None and first_n == 1:
return tf.Print(op, [tf.timestamp(), op], message=prefix, first_n=1)
counter = tf.Variable(tf.zeros(shape=(), dtype=tf.int32) - 1,
aggregation=tf.VariableAggregation.MEAN)
increment = tf.assign_add(counter, 1, use_locking=True)
return tf.cond(
tf.equal(tf.mod(increment, every_n), 0),
lambda :tf.Print(op, [tf.timestamp(), op], message=prefix,
first_n=first_n),
lambda :op
) | 0.009871 |
def os_requires_version(ostack_release, pkg):
"""
Decorator for hook to specify minimum supported release
"""
def wrap(f):
@wraps(f)
def wrapped_f(*args):
if os_release(pkg) < ostack_release:
raise Exception("This hook is not supported on releases"
" before %s" % ostack_release)
f(*args)
return wrapped_f
return wrap | 0.002309 |
def rename_index(self, old_name, new_name=None):
"""
Renames an index.
:param old_name: The name of the index to rename from.
:type old_name: str
:param new_name: The name of the index to rename to.
:type new_name: str or None
:rtype: Table
"""
old_name = self._normalize_identifier(old_name)
normalized_new_name = self._normalize_identifier(new_name)
if old_name == normalized_new_name:
return self
if not self.has_index(old_name):
raise IndexDoesNotExist(old_name, self._name)
if self.has_index(normalized_new_name):
raise IndexAlreadyExists(normalized_new_name, self._name)
old_index = self._indexes[old_name]
if old_index.is_primary():
self.drop_primary_key()
return self.set_primary_key(old_index.get_columns(), new_name)
del self._indexes[old_name]
if old_index.is_unique():
return self.add_unique_index(old_index.get_columns(), new_name)
return self.add_index(old_index.get_columns(), new_name, old_index.get_flags()) | 0.002604 |
def create(self, **kwargs):
"""
Creates a new resource.
:param kwargs: The properties of the resource
:return: The created item returned by the API
wrapped as a `Model` object
"""
response = self.ghost.execute_post('%s/' % self._type_name, json={
self._type_name: [
kwargs
]
})
return self._model_type(response.get(self._type_name)[0]) | 0.004405 |
async def recv_message(self):
"""Coroutine to receive incoming message from the client.
If client sends UNARY request, then you can call this coroutine
only once. If client sends STREAM request, then you should call this
coroutine several times, until it returns None. To simplify your code
in this case, :py:class:`Stream` class implements async iteration
protocol, so you can use it like this:
.. code-block:: python3
async for massage in stream:
do_smth_with(message)
or even like this:
.. code-block:: python3
messages = [msg async for msg in stream]
HTTP/2 has flow control mechanism, so server will acknowledge received
DATA frames as a message only after user consumes this coroutine.
:returns: message
"""
message = await recv_message(self._stream, self._codec, self._recv_type)
message, = await self._dispatch.recv_message(message)
return message | 0.00291 |
def num_coll_reqd(DIM_FRACTAL, material, DiamTarget):
"""Return the number of doubling collisions required.
Calculates the number of doubling collisions required to produce
a floc of diameter DiamTarget.
"""
return DIM_FRACTAL * np.log2(DiamTarget/material.Diameter) | 0.003484 |
def description(self):
"""string or None if unknown"""
name = None
try:
name = self._TYPE_NAMES[self.audioObjectType]
except IndexError:
pass
if name is None:
return
if self.sbrPresentFlag == 1:
name += "+SBR"
if self.psPresentFlag == 1:
name += "+PS"
return text_type(name) | 0.005013 |
def get_settings_from_client(client):
"""Pull out settings from a SoftLayer.BaseClient instance.
:param client: SoftLayer.BaseClient instance
"""
settings = {
'username': '',
'api_key': '',
'timeout': '',
'endpoint_url': '',
}
try:
settings['username'] = client.auth.username
settings['api_key'] = client.auth.api_key
except AttributeError:
pass
transport = _resolve_transport(client.transport)
try:
settings['timeout'] = transport.timeout
settings['endpoint_url'] = transport.endpoint_url
except AttributeError:
pass
return settings | 0.001515 |
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby) | 0.006098 |
def _wrap_definition_section(source, width):
# type: (str, int) -> str
"""Wrap the given definition section string to the current terminal size.
Note:
Auto-adjusts the spacing between terms and definitions.
Args:
source: The section string to wrap.
Returns:
The wrapped section string.
"""
index = source.index('\n') + 1
definitions, max_len = _get_definitions(source[index:])
sep = '\n' + ' ' * (max_len + 4)
lines = [source[:index].strip()]
for arg, desc in six.iteritems(definitions):
wrapped_desc = sep.join(textwrap.wrap(desc, width - max_len - 4))
lines.append(' {arg:{size}} {desc}'.format(
arg=arg,
size=str(max_len),
desc=wrapped_desc
))
return '\n'.join(lines) | 0.001239 |
def setup(app):
"""Setup autodoc."""
# Fix for documenting models.FileField
from django.db.models.fields.files import FileDescriptor
FileDescriptor.__get__ = lambda self, *args, **kwargs: self
import django
django.setup()
app.connect('autodoc-skip-member', skip)
app.add_stylesheet('_static/custom.css') | 0.002985 |
def prune_hashes(self, hashes, list_type):
"""Prune any hashes not in source resource or change list."""
discarded = []
for hash in hashes:
if (hash in self.hashes):
self.hashes.discard(hash)
discarded.append(hash)
self.logger.info("Not calculating %s hash(es) on destination as not present "
"in source %s list" % (', '.join(sorted(discarded)), list_type)) | 0.008734 |
def setup(config):
"""
Setup the environment for an example run.
"""
formatter = config.Formatter()
if config.verbose:
formatter = result.Verbose(formatter)
if config.color:
formatter = result.Colored(formatter)
current_result = result.ExampleResult(formatter)
ivoire.current_result = ivoire._manager.result = current_result | 0.002653 |
def ExtractEventsFromSources(self):
"""Processes the sources and extracts events.
Raises:
BadConfigOption: if the storage file path is invalid or the storage
format not supported or an invalid filter was specified.
SourceScannerError: if the source scanner could not find a supported
file system.
UserAbort: if the user initiated an abort.
"""
self._CheckStorageFile(self._storage_file_path, warn_about_existing=True)
scan_context = self.ScanSource(self._source_path)
self._source_type = scan_context.source_type
self._status_view.SetMode(self._status_view_mode)
self._status_view.SetSourceInformation(
self._source_path, self._source_type,
artifact_filters=self._artifact_filters,
filter_file=self._filter_file)
status_update_callback = (
self._status_view.GetExtractionStatusUpdateCallback())
self._output_writer.Write('\n')
self._status_view.PrintExtractionStatusHeader(None)
self._output_writer.Write('Processing started.\n')
session = engine.BaseEngine.CreateSession(
artifact_filter_names=self._artifact_filters,
command_line_arguments=self._command_line_arguments,
debug_mode=self._debug_mode,
filter_file_path=self._filter_file,
preferred_encoding=self.preferred_encoding,
preferred_time_zone=self._preferred_time_zone,
preferred_year=self._preferred_year)
storage_writer = storage_factory.StorageFactory.CreateStorageWriter(
self._storage_format, session, self._storage_file_path)
if not storage_writer:
raise errors.BadConfigOption(
'Unsupported storage format: {0:s}'.format(self._storage_format))
single_process_mode = self._single_process_mode
if self._source_type == dfvfs_definitions.SOURCE_TYPE_FILE:
# No need to multi process a single file source.
single_process_mode = True
if single_process_mode:
extraction_engine = single_process_engine.SingleProcessEngine()
else:
extraction_engine = multi_process_engine.TaskMultiProcessEngine(
use_zeromq=self._use_zeromq)
# If the source is a directory or a storage media image
# run pre-processing.
if self._source_type in self._SOURCE_TYPES_TO_PREPROCESS:
self._PreprocessSources(extraction_engine)
configuration = self._CreateProcessingConfiguration(
extraction_engine.knowledge_base)
self._SetExtractionParsersAndPlugins(configuration, session)
self._SetExtractionPreferredTimeZone(extraction_engine.knowledge_base)
try:
filter_find_specs = extraction_engine.BuildFilterFindSpecs(
self._artifact_definitions_path, self._custom_artifacts_path,
extraction_engine.knowledge_base, self._artifact_filters,
self._filter_file)
except errors.InvalidFilter as exception:
raise errors.BadConfigOption(
'Unable to build filter specification: {0!s}'.format(exception))
processing_status = None
if single_process_mode:
logger.debug('Starting extraction in single process mode.')
processing_status = extraction_engine.ProcessSources(
self._source_path_specs, storage_writer, self._resolver_context,
configuration, filter_find_specs=filter_find_specs,
status_update_callback=status_update_callback)
else:
logger.debug('Starting extraction in multi process mode.')
processing_status = extraction_engine.ProcessSources(
session.identifier, self._source_path_specs, storage_writer,
configuration, enable_sigsegv_handler=self._enable_sigsegv_handler,
filter_find_specs=filter_find_specs,
number_of_worker_processes=self._number_of_extraction_workers,
status_update_callback=status_update_callback,
worker_memory_limit=self._worker_memory_limit)
self._status_view.PrintExtractionSummary(processing_status) | 0.003288 |
def info(self):
"""Returns the name and version of the current shell"""
proc = Popen(['fish', '--version'],
stdout=PIPE, stderr=DEVNULL)
version = proc.stdout.read().decode('utf-8').split()[-1]
return u'Fish Shell {}'.format(version) | 0.006993 |
def set_pkg_verif_code(self, doc, code):
"""Sets the package verification code, if not already set.
code - A string.
Raises CardinalityError if already defined.
Raises OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_verif_set:
self.package_verif_set = True
doc.package.verif_code = code
else:
raise CardinalityError('Package::VerificationCode') | 0.004049 |
def warningFlag(self):
'''
When viewing individual event registrations, there are a large number of potential
issues that can arise that may warrant scrutiny. This property just checks all of
these conditions and indicates if anything is amiss so that the template need not
check each of these conditions individually repeatedly.
'''
if not hasattr(self,'invoice'):
return True
if apps.is_installed('danceschool.financial'):
'''
If the financial app is installed, then we can also check additional
properties set by that app to ensure that there are no inconsistencies
'''
if self.invoice.revenueNotYetReceived != 0 or self.invoice.revenueMismatch:
return True
return (
self.priceWithDiscount != self.invoice.total or
self.invoice.unpaid or self.invoice.outstandingBalance != 0
) | 0.009269 |
def publish_synchronous(self, *args, **kwargs):
'''
Helper for publishing a message using transactions. If 'cb' keyword
arg is supplied, will be called when the transaction is committed.
'''
cb = kwargs.pop('cb', None)
self.tx.select()
self.basic.publish(*args, **kwargs)
self.tx.commit(cb=cb) | 0.005587 |
def _ParseFileEntry(self, knowledge_base, file_entry):
"""Parses artifact file system data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
"""
if not file_entry or not file_entry.link:
raise errors.PreProcessFail(
'Unable to read: {0:s} with error: not a symbolic link'.format(
self.ARTIFACT_DEFINITION_NAME))
_, _, time_zone = file_entry.link.partition('zoneinfo/')
# TODO: check if time zone is set in knowledge base.
if time_zone:
try:
knowledge_base.SetTimeZone(time_zone)
except ValueError:
# TODO: add and store preprocessing errors.
pass | 0.004582 |
def get_rich_menu_image(self, rich_menu_id, timeout=None):
"""Call download rich menu image API.
https://developers.line.me/en/docs/messaging-api/reference/#download-rich-menu-image
:param str rich_menu_id: ID of the rich menu with the image to be downloaded
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float)
:rtype: :py:class:`linebot.models.responses.Content`
:return: Content instance
"""
response = self._get(
'/v2/bot/richmenu/{rich_menu_id}/content'.format(rich_menu_id=rich_menu_id),
timeout=timeout
)
return Content(response) | 0.004624 |
def run(self):
"""
Returns the sum of unread messages across all registered backends
"""
unread = 0
current_unread = 0
for id, backend in enumerate(self.backends):
temp = backend.unread or 0
unread = unread + temp
if id == self.current_backend:
current_unread = temp
if not unread:
color = self.color
urgent = "false"
if self.hide_if_null:
self.output = None
return
else:
color = self.color_unread
urgent = "true"
format = self.format
if unread > 1:
format = self.format_plural
account_name = getattr(self.backends[self.current_backend], "account", "No name")
self.output = {
"full_text": format.format(unread=unread, current_unread=current_unread, account=account_name),
"urgent": urgent,
"color": color,
} | 0.003972 |
def hybrid_forward(self, F, scores, target_dists, finished, best_hyp_indices):
"""
Choose an extension of each hypothesis from its softmax distribution.
:param scores: Vocabulary scores for the next beam step. (batch_size * beam_size, target_vocabulary_size)
:param target_dists: The non-cumulative target distributions (ignored).
:param finished: The list of finished hypotheses.
:param best_hyp_indices: Best hypothesis indices constant.
:return: The row indices, column indices, and values of the sampled words.
"""
# Map the negative logprobs to probabilities so as to have a distribution
target_dists = F.exp(-target_dists)
# n == 0 means sample from the full vocabulary. Otherwise, we sample from the top n.
if self.n != 0:
# select the top n in each row, via a mask
masked_items = F.topk(target_dists, k=self.n, ret_typ='mask', axis=1, is_ascend=False)
# set unmasked items to 0
masked_items = F.where(masked_items, target_dists, masked_items)
# renormalize
target_dists = F.broadcast_div(masked_items, F.sum(masked_items, axis=1, keepdims=True))
# Sample from the target distributions over words, then get the corresponding values from the cumulative scores
best_word_indices = F.random.multinomial(target_dists, get_prob=False)
# Zeroes for finished hypotheses.
best_word_indices = F.where(finished, F.zeros_like(best_word_indices), best_word_indices)
values = F.pick(scores, best_word_indices, axis=1, keepdims=True)
best_hyp_indices = F.slice_like(best_hyp_indices, best_word_indices, axes=(0,))
return best_hyp_indices, best_word_indices, values | 0.006159 |
def run_manager(self, job_override=None):
"""The run manager.
The run manager is responsible for loading the plugin required based on
what the user has inputted using the parsed_command value as found in
the job_args dict. If the user provides a *job_override* the method
will attempt to import the module and class as provided by the user.
Before the method attempts to run any job the run manager will first
authenticate to the the cloud provider.
:param job_override: ``str`` DOT notation for import with Colin used to
separate the class used for the job.
"""
for arg_name, arg_value in self.job_args.items():
if arg_name.endswith('_headers'):
if isinstance(arg_value, list):
self.job_args[arg_name] = self._list_headers(
headers=arg_value
)
elif not arg_name:
self.job_args[arg_name] = self._str_headers(
header=arg_value
)
else:
self.job_args[arg_name] = dict()
# Set base header for the user-agent
self.job_args['base_headers']['User-Agent'] = 'turbolift'
LOG.info('Authenticating')
indicator_options = {'run': self.job_args.get('run_indicator', True)}
with indicator.Spinner(**indicator_options):
LOG.debug('Authenticate against the Service API')
self.job_args.update(auth.authenticate(job_args=self.job_args))
if job_override:
action = self._get_method(method=job_override)
else:
parsed_command = self.job_args.get('parsed_command')
if not parsed_command:
raise exceptions.NoCommandProvided(
'Please provide a command. Basic commands are: %s',
list(self.job_map.keys())
)
else:
action = self._get_method(method=self.job_map[parsed_command])
run = action(job_args=self.job_args)
run.start() | 0.000925 |
def vnic_attach_to_network_distributed(nicspec, port_group, logger):
"""
Attach vNIC to a Distributed Port Group network
:param nicspec: <vim.vm.device.VirtualDeviceSpec>
:param port_group: <vim.dvs.DistributedVirtualPortgroup>
:param logger:
:return: updated 'nicspec'
"""
if nicspec and network_is_portgroup(port_group):
network_name = port_group.name
dvs_port_connection = vim.dvs.PortConnection()
dvs_port_connection.portgroupKey = port_group.key
dvs_port_connection.switchUuid = port_group.config.distributedVirtualSwitch.uuid
nicspec.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
nicspec.device.backing.port = dvs_port_connection
logger.debug(u"Assigning portgroup '{}' for vNIC".format(network_name))
else:
logger.warn(u"Cannot assigning portgroup for vNIC")
return nicspec | 0.005 |
def set_logger(self, logger_name, level=logging.INFO):
"""
Convenience function to quickly configure full debug output
to go to the console.
"""
log = logging.getLogger(logger_name)
log.setLevel(level)
ch = logging.StreamHandler(None)
ch.setLevel(level)
# create formatter
if level == logging.INFO:
formatter = logging.Formatter(InfoFmtString)
else:
formatter = logging.Formatter(DebugFmtString)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch) | 0.003175 |
def parse_ontology(self, fn, flatten=True, part_of_cc_only=False):
""" Parse an OBO file and store GO term information.
This function needs to be called before `parse_annotations`, in order
to read in the Gene Ontology terms and structure.
Parameters
----------
fn: str
Path of the OBO file.
flatten: bool, optional
If set to False, do not generate a list of all ancestors and
descendants for each GO term. Warning: Without flattining,
GOparser cannot propagate GO annotations properly.
part_of_cc_only: bool, optional
Legacy parameter for backwards compatibility. If set to True,
ignore ``part_of`` relations outside the ``celluclar_component``
domain.
Notes
-----
The function erases all previously parsed data.
The function requires the OBO file to end with a line break.
"""
self.clear_data() # clear all old data
with open(fn) as fh:
n = 0
while True:
try:
nextline = next(fh)
except StopIteration:
break
if nextline == '[Term]\n':
n += 1
id_ = next(fh)[4:-1]
# acc = get_acc(id_)
name = next(fh)[6:-1]
self._name2id[name] = id_
domain = next(fh)[11:-1]
def_ = None
is_a = set()
part_of = set()
l = next(fh)
while l != '\n':
if l.startswith('alt_id:'):
self._alt_id[l[8:-1]] = id_
elif l.startswith('def: '):
idx = l[6:].index('"')
def_ = l[6:(idx+6)]
elif l.startswith('is_a:'):
is_a.add(l[6:16])
elif l.startswith('synonym:'):
idx = l[10:].index('"')
if l[(10+idx+2):].startswith("EXACT"):
s = l[10:(10+idx)]
self._syn2id[s] = id_
elif l.startswith('relationship: part_of'):
if part_of_cc_only:
if domain == 'cellular_component':
part_of.add(l[22:32])
else:
part_of.add(l[22:32])
l = next(fh)
assert def_ is not None
self.terms[id_] = GOTerm(id_, name, domain,
def_, is_a, part_of)
logger.info('Parsed %d GO term definitions.', n)
# store children and parts
logger.info('Adding child and part relationships...')
for id_, term in self.terms.items():
for parent in term.is_a:
self.terms[parent].children.add(id_)
for whole in term.part_of:
self.terms[whole].parts.add(id_)
if flatten:
logger.info('Flattening ancestors...')
self._flatten_ancestors()
logger.info('Flattening descendants...')
self._flatten_descendants()
self._flattened = True | 0.001444 |
def find_loops(self, _path=None):
"""Crappy function that finds a single loop in the tree"""
if _path is None:
_path = []
if self in _path:
return _path + [self]
elif self._children == []:
return None
else:
for child in self._children:
return child.find_loops(_path + [self]) | 0.005249 |
def dependencyOrder(aMap, aList = None):
"""
Given descriptions of dependencies in aMap and an optional list of items in aList
if not aList, aList = aMap.keys()
Returns a list containing each element of aList and all its precursors so that every precursor of
any element in the returned list is seen before that dependent element.
If aMap contains cycles, something will happen. It may not be pretty...
"""
dependencyMap = makeDependencyMap(aMap)
outputList = []
if not aList:
aList = aMap.keys()
items = []
v = BottomUpVisitor()
for item in aList:
try:
v.visit(dependencyMap[item])
except KeyError:
outputList.append(item)
outputList = [x.item for x in v.history]+outputList
return outputList | 0.021419 |
def process_fields(self, field_names, depth):
'''
The primary purpose of this function is to store the sql field list
and the depth to which we process.
'''
# List of field names in correct order.
self.field_names = field_names
# number of fields.
self.num_fields = len(field_names)
# Constrain depth.
if (depth == 0) or (depth >= self.num_fields):
self.depth = self.num_fields - 1
else:
self.depth = depth | 0.003861 |
def equation(self):
"""Mix-in class that returns matrix rows for difference in head between inside and
outside equals zeros
Returns matrix part (nunknowns,neq)
Returns rhs part nunknowns
"""
mat = np.empty((self.nunknowns, self.model.neq))
rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero
for icp in range(self.ncp):
istart = icp * self.nlayers
ieq = 0
for e in self.model.elementlist:
if e.nunknowns > 0:
qxin, qyin = e.disvecinflayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aqin)
qxout, qyout = e.disvecinflayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aqout)
mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \
(qxin - qxout) * self.cosnorm[icp] + (qyin - qyout) * self.sinnorm[icp]
ieq += e.nunknowns
else:
qxin, qyin = e.disveclayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aqin)
qxout, qyout = e.disveclayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aqout)
rhs[istart:istart + self.nlayers] -= (qxin - qxout) * self.cosnorm[icp] + (qyin - qyout) * \
self.sinnorm[icp]
return mat, rhs | 0.007682 |
def load_config(self, path, environments, fill_with_defaults=False):
"""Will load default.yaml and <environment>.yaml at given path.
The environment config will override the default values.
:param path: directory where to find your config files. If the last character is not a slash (/) it will be appended. Example: resources/
:param environments: list of environment configs to load. File name pattern: <environment>.yaml. Example: develop.yaml. Latter configs will override previous ones.
:param fill_with_defaults: use 'defaults' keyword in config file to fill up following config entrys.
:return: your config as dictionary.
"""
yaml.add_implicit_resolver("!environ", self.__environ_pattern)
yaml.add_constructor('!environ', self.__get_from_environment)
yaml.add_implicit_resolver("!vault", self.__vault_pattern)
yaml.add_constructor('!vault', self.__get_from_vault)
if not path.endswith('/'):
path += '/'
if type(environments) != list:
environments = [environments]
config = {}
try:
for env in environments:
with open(path + env + '.yaml', 'r') as configFile:
env_config = yaml.load(configFile.read()) or {}
config.update(env_config)
if fill_with_defaults:
if 'defaults' in config:
defaults = config['defaults']
for target in defaults:
for index, item in enumerate(config[target]):
tmp = defaults[target].copy()
tmp.update(config[target][index])
config[target][index] = tmp
return config
except exceptions.VaultError as error:
raise ConfigLoaderError("Could not read vault secrets [" + error.__class__.__name__ + "]")
except yaml.YAMLError as error:
raise ConfigLoaderError("Configuration files malformed [" + error.__class__.__name__ + "]")
except json.decoder.JSONDecodeError as error:
raise ConfigLoaderError("Vault response was not json [" + error.__class__.__name__ + "]")
except Exception as error:
raise ConfigLoaderError("WTF? [" + error.__class__.__name__ + "]") | 0.003396 |
def _ssid_inventory(self, inventory, ssid):
"""
Filters an inventory to only return servers matching ssid
"""
matching_hosts = {}
for host in inventory:
if inventory[host]['comment'] == ssid:
matching_hosts[host] = inventory[host]
return matching_hosts | 0.006079 |
def is_callable(self):
"""
Ensures :attr:`subject` is a callable.
"""
if not callable(self._subject):
raise self._error_factory(_format("Expected {} to be callable", self._subject)) | 0.013333 |
def restore_backup(self, backup, name, flavor, volume):
"""
Restores a backup to a new database instance. You must supply a backup
(either the ID or a CloudDatabaseBackup object), a name for the new
instance, as well as a flavor and size (in GB) for the instance.
"""
return self._manager.restore_backup(backup, name, flavor, volume) | 0.005249 |
def intensityDistributionSTE(self, bins=10, range=None):
'''
return distribution of STE intensity
'''
v = np.abs(self._last_diff[self.mask_STE])
return np.histogram(v, bins, range) | 0.008889 |
def reserve_file(self, relative_path):
"""reserve a XML file for the slice at <relative_path>.xml
- the relative path will be created for you
- not writing anything to that file is an error
"""
if os.path.isabs(relative_path):
raise ValueError('%s must be a relative path' % relative_path)
dest_path = os.path.join(self.root_dir, '%s.xml' % relative_path)
if os.path.exists(dest_path):
raise ValueError('%r must not already exist' % dest_path)
if dest_path in self.expected_xunit_files:
raise ValueError('%r already reserved' % dest_path)
dest_dir = os.path.dirname(dest_path)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
self.expected_xunit_files.append(dest_path)
return dest_path | 0.002375 |
def _appendstore(self, store):
"""Join another store on to the end of this one."""
if not store.bitlength:
return
# Set new array offset to the number of bits in the final byte of current array.
store = offsetcopy(store, (self.offset + self.bitlength) % 8)
if store.offset:
# first do the byte with the join.
joinval = (self._rawarray.pop() & (255 ^ (255 >> store.offset)) |
(store.getbyte(0) & (255 >> store.offset)))
self._rawarray.append(joinval)
self._rawarray.extend(store._rawarray[1:])
else:
self._rawarray.extend(store._rawarray)
self.bitlength += store.bitlength | 0.004155 |
def block_create(
self,
type,
account,
wallet=None,
representative=None,
key=None,
destination=None,
amount=None,
balance=None,
previous=None,
source=None,
work=None,
):
"""
Creates a json representations of new block based on input data &
signed with private key or account in **wallet** for offline signing
.. enable_control required
.. version 8.1 required
:param type: Type of block to create one of **open**, **receive**,
**change**, **send**
:type type: str
:param account: Account for the signed block
:type account: str
:param wallet: Wallet to use
:type wallet: str
:param representative: Representative account for **open** and
**change** blocks
:type representative: str
:param key: Private key to use to open account for **open** blocks
:type key: str
:param destination: Destination account for **send** blocks
:type destination: str
:param amount: Amount in raw for **send** blocks
:type amount: int
:param balance: Balance in raw of account for **send** blocks
:type balance: int
:param previous: Previous block hash for **receive**, **send**
and **change** blocks
:type previous: str
:param source: Source block for **open** and **receive** blocks
:type source: str
:param work: Work value to use for block from external source
:type work: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.block_create(
... type="open",
... account="xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951",
... source="19D3D919475DEED4696B5D13018151D1AF88B2BD3BCFF048B45031C1F36D1858",
... representative="xrb_1hza3f7wiiqa7ig3jczyxj5yo86yegcmqk3criaz838j91sxcckpfhbhhra1",
... key="0000000000000000000000000000000000000000000000000000000000000001"
... )
{
"block": {
"account": "xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951",
"representative": "xrb_1hza3f7wiiqa7ig3jczyxj5yo86yegcmqk3criaz838j91sxcckpfhbhhra1",
"signature": "5974324F8CC42DA56F62FC212A17886BDCB18DE363D04DA84EEDC99CB4A33919D14A2CF9DE9D534FAA6D0B91D01F0622205D898293525E692586C84F2DCF9208",
"source": "19D3D919475DEED4696B5D13018151D1AF88B2BD3BCFF048B45031C1F36D1858",
"type": "open",
"work": "4ec76c9bda2325ed"
},
"hash": "F47B23107E5F34B2CE06F562B5C435DF72A533251CB414C51B2B62A8F63A00E4"
}
>>> rpc.block_create(
... type="receive",
... account="xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951",
... previous="F47B23107E5F34B2CE06F562B5C435DF72A533251CB414C51B2B62A8F63A00E4",
... source="19D3D919475DEED4696B5D13018151D1AF88B2BD3BCFF048B45031C1F36D1858",
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... )
{
"block": {
"previous": "F47B23107E5F34B2CE06F562B5C435DF72A533251CB414C51B2B62A8F63A00E4",
"signature": "A13FD22527771667D5DFF33D69787D734836A3561D8A490C1F4917A05D77EA09860461D5FBFC99246A4EAB5627F119AD477598E22EE021C4711FACF4F3C80D0E",
"source": "19D3D919475DEED4696B5D13018151D1AF88B2BD3BCFF048B45031C1F36D1858",
"type": "receive",
"work": "6acb5dd43a38d76a"
},
"hash": "314BA8D9057678C1F53371C2DB3026C1FAC01EC8E7802FD9A2E8130FC523429E"
}
>>> rpc.block_create(
... type="send",
... account="xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951",
... amount=10000000000000000000000000000000,
... balance=20000000000000000000000000000000,
... destination="xrb_18gmu6engqhgtjnppqam181o5nfhj4sdtgyhy36dan3jr9spt84rzwmktafc",
... previous="314BA8D9057678C1F53371C2DB3026C1FAC01EC8E7802FD9A2E8130FC523429E",
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... work="478563b2d9facfd4",
... )
{
"block": {
"balance": "0000007E37BE2022C0914B2680000000",
"destination": "xrb_18gmu6engqhgtjnppqam181o5nfhj4sdtgyhy36dan3jr9spt84rzwmktafc",
"previous": "314BA8D9057678C1F53371C2DB3026C1FAC01EC8E7802FD9A2E8130FC523429E",
"signature": "F19CA177EFA8692C8CBF7478CE3213F56E4A85DF760DA7A9E69141849831F8FD79BA9ED89CEC807B690FB4AA42D5008F9DBA7115E63C935401F1F0EFA547BC00",
"type": "send",
"work": "478563b2d9facfd4"
},
"hash": "F958305C0FF0551421D4ABEDCCF302079D020A0A3833E33F185E2B0415D4567A"
}
>>> rpc.block_create(
... type="change",
... account="xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951",
... representative="xrb_18gmu6engqhgtjnppqam181o5nfhj4sdtgyhy36dan3jr9spt84rzwmktafc",
... previous="F958305C0FF0551421D4ABEDCCF302079D020A0A3833E33F185E2B0415D4567A",
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... )
{
"block": {
"previous": "F958305C0FF0551421D4ABEDCCF302079D020A0A3833E33F185E2B0415D4567A",
"representative": "xrb_18gmu6engqhgtjnppqam181o5nfhj4sdtgyhy36dan3jr9spt84rzwmktafc",
"signature": "98B4D56881D9A88B170A6B2976AE21900C26A27F0E2C338D93FDED56183B73D19AA5BEB48E43FCBB8FF8293FDD368CEF50600FECEFD490A0855ED702ED209E04",
"type": "change",
"work": "55e5b7a83edc3f4f"
},
"hash": "654FA425CEBFC9E7726089E4EDE7A105462D93DBC915FFB70B50909920A7D286"
}
"""
payload = {
"type": self._process_value(type, 'blocktype'),
"account": self._process_value(account, 'account'),
}
if representative is not None:
payload['representative'] = self._process_value(representative, 'account')
if key is not None:
payload['key'] = self._process_value(key, 'privatekey')
if source is not None:
payload['source'] = self._process_value(source, 'block')
if destination is not None:
payload['destination'] = self._process_value(destination, 'account')
if amount is not None:
payload['amount'] = self._process_value(amount, 'int')
if balance is not None:
payload['balance'] = self._process_value(balance, 'int')
if previous is not None:
payload['previous'] = self._process_value(previous, 'block')
if wallet is not None:
payload['wallet'] = self._process_value(wallet, 'wallet')
if work is not None:
payload['work'] = self._process_value(work, 'work')
resp = self.call('block_create', payload)
resp['block'] = json.loads(resp['block'])
return resp | 0.005167 |
def to_pycode(self):
"""Create a python code object from the more abstract
codetransfomer.Code object.
Returns
-------
co : CodeType
The python code object.
"""
consts = self.consts
names = self.names
varnames = self.varnames
freevars = self.freevars
cellvars = self.cellvars
bc = bytearray()
for instr in self.instrs:
bc.append(instr.opcode) # Write the opcode byte.
if isinstance(instr, LOAD_CONST):
# Resolve the constant index.
bc.extend(consts.index(instr.arg).to_bytes(argsize, 'little'))
elif instr.uses_name:
# Resolve the name index.
bc.extend(names.index(instr.arg).to_bytes(argsize, 'little'))
elif instr.uses_varname:
# Resolve the local variable index.
bc.extend(
varnames.index(instr.arg).to_bytes(argsize, 'little'),
)
elif instr.uses_free:
# uses_free is really "uses freevars **or** cellvars".
try:
# look for the name in cellvars
bc.extend(
cellvars.index(instr.arg).to_bytes(argsize, 'little'),
)
except ValueError:
# fall back to freevars, incrementing the length of
# cellvars.
bc.extend(
(freevars.index(instr.arg) + len(cellvars)).to_bytes(
argsize,
'little',
)
)
elif instr.absjmp:
# Resolve the absolute jump target.
bc.extend(
self.bytecode_offset(instr.arg).to_bytes(
argsize,
'little',
),
)
elif instr.reljmp:
# Resolve the relative jump target.
# We do this by subtracting the curren't instructions's
# sparse index from the sparse index of the argument.
# We then subtract argsize - 1 to account for the bytes the
# current instruction takes up.
bytecode_offset = self.bytecode_offset
bc.extend((
bytecode_offset(instr.arg) -
bytecode_offset(instr) -
argsize -
1
).to_bytes(argsize, 'little',))
elif instr.have_arg:
# Write any other arg here.
bc.extend(instr.arg.to_bytes(argsize, 'little'))
elif WORDCODE:
# with wordcode, all instructions are padded to 2 bytes
bc.append(0)
return CodeType(
self.argcount,
self.kwonlyargcount,
len(varnames),
self.stacksize,
self.py_flags,
bytes(bc),
consts,
names,
varnames,
self.filename,
self.name,
self.firstlineno,
self.py_lnotab,
freevars,
cellvars,
) | 0.000606 |
def render_workflow_html_template(filename, subtemplate, filelists, **kwargs):
""" Writes a template given inputs from the workflow generator. Takes
a list of tuples. Each tuple is a pycbc File object. Also the name of the
subtemplate to render and the filename of the output.
"""
dirnam = os.path.dirname(filename)
makedir(dirnam)
try:
filenames = [f.name for filelist in filelists for f in filelist if f is not None]
except TypeError:
filenames = []
# render subtemplate
subtemplate_dir = pycbc.results.__path__[0] + '/templates/wells'
env = Environment(loader=FileSystemLoader(subtemplate_dir))
env.globals.update(get_embedded_config=get_embedded_config)
env.globals.update(path_exists=os.path.exists)
env.globals.update(len=len)
subtemplate = env.get_template(subtemplate)
context = {'filelists' : filelists,
'dir' : dirnam}
context.update(kwargs)
output = subtemplate.render(context)
# save as html page
kwds = {'render-function' : 'render_tmplt',
'filenames' : ','.join(filenames)}
save_html_with_metadata(str(output), filename, None, kwds) | 0.005098 |
def respond(self):
"""Call the gateway and write its iterable output."""
mrbs = self.server.max_request_body_size
if self.chunked_read:
self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
else:
cl = int(self.inheaders.get(b'Content-Length', 0))
if mrbs and mrbs < cl:
if not self.sent_headers:
self.simple_response(
'413 Request Entity Too Large',
'The entity sent with the request exceeds the '
'maximum allowed bytes.',
)
return
self.rfile = KnownLengthRFile(self.conn.rfile, cl)
self.server.gateway(self).respond()
self.ready and self.ensure_headers_sent()
if self.chunked_write:
self.conn.wfile.write(b'0\r\n\r\n') | 0.002281 |
async def update_notifications(self, on_match_open: bool = None, on_tournament_end: bool = None):
""" update participants notifications for this tournament
|methcoro|
Args:
on_match_open: Email registered Challonge participants when matches open up for them
on_tournament_end: Email registered Challonge participants the results when this tournament ends
Raises:
APIException
"""
params = {}
if on_match_open is not None:
params['notify_users_when_matches_open'] = on_match_open
if on_tournament_end is not None:
params['notify_users_when_the_tournament_ends'] = on_tournament_end
assert_or_raise(len(params) > 0, ValueError, 'At least one of the notifications must be given')
await self.update(**params) | 0.007075 |
def show_message(self, c_attack, c_defend, result, dmg, print_console='Yes'):
"""
function to wrap the display of the battle messages
"""
perc_health_att = '[' + str(round((c_attack.stats['Health']*100) / c_attack.stats['max_health'] )) + '%]'
perc_health_def = '[' + str(round((c_defend.stats['Health']*100) / c_defend.stats['max_health'] )) + '%]'
if result == 'Miss':
txt = c_attack.name + ' ' + perc_health_att.rjust(6) + ' miss ' + c_defend.name + ' ' + perc_health_def.rjust(6)
elif result == 'Crit':
txt = c_attack.name + ' ' + perc_health_att.rjust(6) + ' CRIT ' + c_defend.name + ' ' + perc_health_def.rjust(6)
txt += ' for ' + str(dmg)
else:
txt = c_attack.name + ' ' + perc_health_att.rjust(6) + ' hits ' + c_defend.name + ' ' + perc_health_def.rjust(6)
txt += ' for ' + str(dmg)
if print_console == 'Yes':
print(txt) | 0.015152 |
def calculate_input(self, buffer):
"""
Calculate how many keystrokes were used in triggering this folder (if applicable).
"""
if TriggerMode.ABBREVIATION in self.modes and self.backspace:
if self._should_trigger_abbreviation(buffer):
if self.immediate:
return len(self._get_trigger_abbreviation(buffer))
else:
return len(self._get_trigger_abbreviation(buffer)) + 1
if self.parent is not None:
return self.parent.calculate_input(buffer)
return 0 | 0.005085 |
def experiment(self, key, values):
"""Populate the ``experiment`` key.
Also populates the ``legacy_name``, the ``accelerator``, and the
``institutions`` keys through side effects.
"""
experiment = self.get('experiment', {})
legacy_name = self.get('legacy_name', '')
accelerator = self.get('accelerator', {})
institutions = self.get('institutions', [])
for value in force_list(values):
if value.get('c'):
experiment['value'] = value.get('c')
if value.get('d'):
experiment['short_name'] = value.get('d')
if value.get('a'):
legacy_name = value.get('a')
if value.get('b'):
accelerator['value'] = value.get('b')
institution = {}
if value.get('u'):
institution['value'] = value.get('u')
if value.get('z'):
record = get_record_ref(maybe_int(value.get('z')), 'institutions')
if record:
institution['curated_relation'] = True
institution['record'] = record
institutions.append(institution)
self['legacy_name'] = legacy_name
self['accelerator'] = accelerator
self['institutions'] = institutions
return experiment | 0.000806 |
def get_task_list(self, since='', task_types='', task_status=''):
"""
invokes TouchWorksMagicConstants.ACTION_GET_TASK_LIST action
:param since - If given a datetime, retrieves only tasks created (or last modified)
after that date and time. Defaults to 1/1/1900.
:param task_status - Optional list of pipe-delimited task status names.
For example, "Active|In Progress|Complete".
:param task_types - Optional list of pipe-delimited task type names.
For example, "Sign Note|Verify Result|MedRenewal"
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_TASK_LIST,
parameter1=since,
parameter2=task_types,
parameter3=task_status)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_ENCOUNTER_LIST_FOR_PATIENT)
return result | 0.003636 |
def compilers(self):
"""The list of compilers used to build asset."""
return [self.environment.compilers.get(e) for e in self.compiler_extensions] | 0.018519 |
def safe_remove_file(filename, app):
"""
Removes a given resource file from builder resources.
Needed mostly during test, if multiple sphinx-build are started.
During these tests js/cass-files are not cleaned, so a css_file from run A is still registered in run B.
:param filename: filename to remove
:param app: app object
:return: None
"""
data_file = filename
static_data_file = os.path.join("_static", data_file)
if data_file.split(".")[-1] == "js":
if hasattr(app.builder, "script_files") and static_data_file in app.builder.script_files:
app.builder.script_files.remove(static_data_file)
elif data_file.split(".")[-1] == "css":
if hasattr(app.builder, "css_files") and static_data_file in app.builder.css_files:
app.builder.css_files.remove(static_data_file) | 0.004684 |
def vinet_v_single(p, v0, k0, k0p, min_strain=0.01):
"""
find volume at given pressure using brenth in scipy.optimize
this is for single p value, not vectorized
:param p: pressure in GPa
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:param min_strain: defining minimum v/v0 value to search volume for
:return: unit cell volume at high pressure in A^3
"""
if p <= 1.e-5:
return v0
def f_diff(v, v0, k0, k0p, p):
return vinet_p(v, v0, k0, k0p) - p
v = brenth(f_diff, v0, v0 * min_strain, args=(v0, k0, k0p, p))
return v | 0.00141 |
def chebi(name=None, identifier=None) -> Abundance:
"""Build a ChEBI abundance node."""
return Abundance(namespace='CHEBI', name=name, identifier=identifier) | 0.006061 |
def download_file(p_realm, p_url, p_op_file, p_username, p_password):
"""
Currently not working...
# https://docs.python.org/3/library/urllib.request.html#examples
# Create an OpenerDirector with support for Basic HTTP Authentication...
"""
auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password(realm=p_realm,
uri=p_url,
user=p_username,
passwd=p_password)
opener = urllib.request.build_opener(auth_handler)
# ...and install it globally so it can be used with urlopen.
urllib.request.install_opener(opener)
web = urllib.request.urlopen(p_url)
with open(p_op_file, 'w') as f:
f.write(web.read().decode('utf-8')) | 0.001279 |
def to_df(figure):
"""
Extracts the data from a Plotly Figure
Parameters
----------
figure : plotly_figure
Figure from which data will be
extracted
Returns a DataFrame or list of DataFrame
"""
dfs=[]
for trace in figure['data']:
if 'scatter' in trace['type']:
try:
if type(trace['x'][0])==float:
index=trace['x']
else:
index=pd.to_datetime(trace['x'])
except:
index=trace['x']
if 'marker' in trace:
d={}
if 'size' in trace['marker']:
size=trace['marker']['size']
if type(size)!=list:
size=[size]*len(index)
d['size']=size
if 'text' in trace:
d['text']=trace['text']
if 'name' in trace:
name=trace['name']
if type(name)!=list:
name=[name]*len(index)
d['categories']=name
d['y']=trace['y']
d['x']=trace['x']
if 'z' in trace:
d['z']=trace['z']
df=pd.DataFrame(d)
else:
df=pd.Series(trace['y'],index=index,name=trace['name'])
dfs.append(df)
elif trace['type'] in ('heatmap','surface'):
df=pd.DataFrame(trace['z'].transpose(),index=trace['x'],columns=trace['y'])
dfs.append(df)
elif trace['type'] in ('box','histogram'):
vals=trace['x'] if 'x' in trace else trace['y']
df=pd.DataFrame({trace['name']:vals})
dfs.append(df)
if max(list(map(len,dfs)))==min(list(map(len,dfs))):
if len(dfs)==1:
return dfs[0]
else:
if type(dfs[0])==pd.core.series.Series:
return pd.concat(dfs,axis=1)
if all(dfs[0].columns==dfs[1].columns):
return pd.concat(dfs,axis=0)
else:
return pd.concat(dfs,axis=1)
else:
try:
return pd.concat(dfs)
except:
return dfs | 0.020678 |
def dcos_version():
"""Return the version of the running cluster.
:return: DC/OS cluster version as a string
"""
url = _gen_url('dcos-metadata/dcos-version.json')
response = dcos.http.request('get', url)
if response.status_code == 200:
return response.json()['version']
else:
return None | 0.003012 |
def create_from_binary(cls, ignore_signature_check, binary_view):
'''Creates a new object MFTHeader from a binary stream. The binary
stream can be represented by a byte string, bytearray or a memoryview of the
bytearray.
Args:
binary_view (memoryview of bytearray) - A binary stream with the
information of the attribute
Returns:
MFTHeader: New object using hte binary stream as source
'''
sig, fx_offset, fx_count, lsn, seq_number, hard_link_count, first_attr_offset, \
usage_flags, entry_len, alloc_len, base_record, next_attr_id, record_n = \
cls._REPR.unpack(binary_view[:cls._REPR.size])
baad = None
if not ignore_signature_check:
if sig == b"FILE":
baad = False
elif sig == b"BAAD":
baad = True
else:
raise HeaderError("Entry has no valid signature.", "MFTHeader")
if fx_offset < MFTHeader._REPR.size: #header[1] is fx_offset
raise HeaderError("Fix up array begins within the header.", "MFTHeader")
if first_attr_offset < cls._REPR.size: #first attribute offset < header size
raise HeaderError("First attribute offset points to inside of the header.", "MFTHeader")
if entry_len > alloc_len: #entry_len > entry_alloc_len
raise HeaderError("Logical size of the MFT is bigger than MFT allocated size.", "MFTHeader")
file_ref, file_seq = get_file_reference(base_record)
nw_obj = cls((baad, fx_offset, fx_count, lsn, seq_number, hard_link_count,
first_attr_offset, MftUsageFlags(usage_flags), entry_len, alloc_len,
file_ref, file_seq, next_attr_id, record_n))
return nw_obj | 0.011086 |
def vol(self, gain, gain_type='amplitude', limiter_gain=None):
'''Apply an amplification or an attenuation to the audio signal.
Parameters
----------
gain : float
Interpreted according to the given `gain_type`.
If `gain_type' = 'amplitude', `gain' is a positive amplitude ratio.
If `gain_type' = 'power', `gain' is a power (voltage squared).
If `gain_type' = 'db', `gain' is in decibels.
gain_type : string, default='amplitude'
Type of gain. One of:
- 'amplitude'
- 'power'
- 'db'
limiter_gain : float or None, default=None
If specified, a limiter is invoked on peaks greater than
`limiter_gain' to prevent clipping.
`limiter_gain` should be a positive value much less than 1.
See Also
--------
gain, compand
'''
if not is_number(gain):
raise ValueError('gain must be a number.')
if limiter_gain is not None:
if (not is_number(limiter_gain) or
limiter_gain <= 0 or limiter_gain >= 1):
raise ValueError(
'limiter gain must be a positive number less than 1'
)
if gain_type in ['amplitude', 'power'] and gain < 0:
raise ValueError(
"If gain_type = amplitude or power, gain must be positive."
)
effect_args = ['vol']
effect_args.append('{:f}'.format(gain))
if gain_type == 'amplitude':
effect_args.append('amplitude')
elif gain_type == 'power':
effect_args.append('power')
elif gain_type == 'db':
effect_args.append('dB')
else:
raise ValueError('gain_type must be one of amplitude power or db')
if limiter_gain is not None:
if gain_type in ['amplitude', 'power'] and gain > 1:
effect_args.append('{:f}'.format(limiter_gain))
elif gain_type == 'db' and gain > 0:
effect_args.append('{:f}'.format(limiter_gain))
self.effects.extend(effect_args)
self.effects_log.append('vol')
return self | 0.000886 |
def filter(self, filter):
"""Filter entries by calling function or applying regex."""
if hasattr(filter, '__call__'):
return [entry for entry in self.entries if filter(entry)]
else:
pattern = re.compile(filter, re.IGNORECASE)
return [entry for entry in self.entries if pattern.match(entry)] | 0.005714 |
def get_compressed_filename(self, filename):
"""If the given filename should be compressed, returns the
compressed filename.
A file can be compressed if:
- It is a whitelisted extension
- The compressed file does not exist
- The compressed file exists by is older than the file itself
Otherwise, it returns False.
"""
if not os.path.splitext(filename)[1][1:] in self.suffixes_to_compress:
return False
file_stats = None
compressed_stats = None
compressed_filename = '{}.{}'.format(filename, self.suffix)
try:
file_stats = os.stat(filename)
compressed_stats = os.stat(compressed_filename)
except OSError: # FileNotFoundError is for Python3 only
pass
if file_stats and compressed_stats:
return (compressed_filename
if file_stats.st_mtime > compressed_stats.st_mtime
else False)
else:
return compressed_filename | 0.001896 |
def create_key_file(service, key):
"""Create a file containing key."""
keyfile = _keyfile_path(service)
if os.path.exists(keyfile):
log('Keyfile exists at %s.' % keyfile, level=WARNING)
return
with open(keyfile, 'w') as fd:
fd.write(key)
log('Created new keyfile at %s.' % keyfile, level=INFO) | 0.00295 |
def front(self, *fields):
'''Return the front pair of the structure'''
ts = self.irange(0, 0, fields=fields)
if ts:
return ts.start(), ts[0] | 0.011111 |
def memoize(f):
"""Cache value returned by the function."""
@wraps(f)
def w(*args, **kw):
memoize.mem[f] = v = f(*args, **kw)
return v
return w | 0.005714 |
def altaz(self, temperature_C=None, pressure_mbar='standard'):
"""Compute (alt, az, distance) relative to the observer's horizon
The altitude returned is an `Angle` in degrees above the
horizon, while the azimuth is the compass direction in degrees
with north being 0 degrees and east being 90 degrees.
"""
return _to_altaz(self.position.au, self.observer_data,
temperature_C, pressure_mbar) | 0.004292 |
def _track_class_field(cls, field):
""" Track a field on the current model """
if '__' in field:
_track_class_related_field(cls, field)
return
# Will raise FieldDoesNotExist if there is an error
cls._meta.get_field(field)
# Detect m2m fields changes
if isinstance(cls._meta.get_field(field), ManyToManyField):
m2m_changed.connect(
tracking_m2m,
sender=getattr(cls, field).through,
dispatch_uid=repr(cls),
) | 0.002008 |
def make_workspace(measurement, channel=None, name=None, silence=False):
"""
Create a workspace containing the model for a measurement
If `channel` is None then include all channels in the model
If `silence` is True, then silence HistFactory's output on
stdout and stderr.
"""
context = silence_sout_serr if silence else do_nothing
with context():
hist2workspace = ROOT.RooStats.HistFactory.HistoToWorkspaceFactoryFast(
measurement)
if channel is not None:
workspace = hist2workspace.MakeSingleChannelModel(
measurement, channel)
else:
workspace = hist2workspace.MakeCombinedModel(measurement)
workspace = asrootpy(workspace)
keepalive(workspace, measurement)
if name is not None:
workspace.SetName('workspace_{0}'.format(name))
return workspace | 0.001138 |
def dispatch_to_extension_op(op, left, right):
"""
Assume that left or right is a Series backed by an ExtensionArray,
apply the operator defined by op.
"""
# The op calls will raise TypeError if the op is not defined
# on the ExtensionArray
# unbox Series and Index to arrays
if isinstance(left, (ABCSeries, ABCIndexClass)):
new_left = left._values
else:
new_left = left
if isinstance(right, (ABCSeries, ABCIndexClass)):
new_right = right._values
else:
new_right = right
res_values = op(new_left, new_right)
res_name = get_op_result_name(left, right)
if op.__name__ in ['divmod', 'rdivmod']:
return _construct_divmod_result(
left, res_values, left.index, res_name)
return _construct_result(left, res_values, left.index, res_name) | 0.001181 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.