text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def set_command_result(result, unpicklable=False):
"""
Serializes output as JSON and writes it to console output wrapped with special prefix and suffix
:param result: Result to return
:param unpicklable: If True adds JSON can be deserialized as real object.
When False will be deserialized as dictionary
"""
# we do not need to serialize an empty response from the vCenter
if result is None:
return
if isinstance(result, basestring):
return result
json = jsonpickle.encode(result, unpicklable=unpicklable)
result_for_output = str(json)
return result_for_output | 0.003096 |
def np_datetime64_compat(s, *args, **kwargs):
"""
provide compat for construction of strings to numpy datetime64's with
tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation
warning, when need to pass '2015-01-01 09:00:00'
"""
s = tz_replacer(s)
return np.datetime64(s, *args, **kwargs) | 0.003021 |
def declare(queues):
"""Initialize the given queues."""
current_queues.declare(queues=queues)
click.secho(
'Queues {} have been declared.'.format(
queues or current_queues.queues.keys()),
fg='green'
) | 0.004098 |
def _load(self, data_path, ignore_missing=False):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
if data_path.endswith(".npz"):
data_dict = np.load(data_path)
keys = sorted(data_dict.keys())
for i, k in enumerate(keys):
data = data_dict[k]
op_name = "_".join(k.split("_")[:-1])
param_name = "weights" if k.split("_")[-1] == "W" else "biases"
if self.verbose:
print("Loaded: {} {}".format(op_name, param_name))
if op_name not in self.weights:
self.weights[op_name] = {}
self.weights[op_name][param_name] = data
elif data_path.endswith(".npy"):
data_dict = np.load(data_path).item()
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].iteritems():
if self.verbose:
print("Loaded: {} {}".format(op_name, param_name))
if op_name not in self.weights:
self.weights[op_name] = {}
self.weights[op_name][param_name] = data
else:
raise RuntimeError("Invalid file type.") | 0.001979 |
def resume_instance(self, paused_info):
"""Restarts a paused instance, retaining disk and config.
:param str instance_id: instance identifier
:raises: `InstanceError` if instance cannot be resumed.
:return: dict - information needed to restart instance.
"""
if not paused_info.get("instance_id"):
log.info("Instance to stop has no instance id.")
return
gce = self._connect()
try:
request = gce.instances().start(project=self._project_id,
instance=paused_info["instance_id"],
zone=self._zone)
operation = self._execute_request(request)
response = self._wait_until_done(operation)
self._check_response(response)
return
except HttpError as e:
log.error("Error restarting instance: `%s", e)
raise InstanceError("Error restarting instance `%s`", e) | 0.002953 |
def delete_api(name, description=None, region=None, key=None, keyid=None, profile=None):
'''
Delete all REST API Service with the given name and an optional API description
Returns {deleted: True, count: deleted_count} if apis were deleted, and
returns {deleted: False} if error or not found.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.delete_api myapi_name
salt myminion boto_apigateway.delete_api myapi_name description='api description'
'''
try:
conn_params = dict(region=region, key=key, keyid=keyid, profile=profile)
r = _find_apis_by_name(name, description=description, **conn_params)
apis = r.get('restapi')
if apis:
conn = _get_conn(**conn_params)
for api in apis:
conn.delete_rest_api(restApiId=api['id'])
return {'deleted': True, 'count': len(apis)}
else:
return {'deleted': False}
except ClientError as e:
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)} | 0.004651 |
def sor(A, x, b, omega, iterations=1, sweep='forward'):
"""Perform SOR iteration on the linear system Ax=b.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
omega : scalar
Damping parameter
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
Returns
-------
Nothing, x will be modified in place.
Notes
-----
When omega=1.0, SOR is equivalent to Gauss-Seidel.
Examples
--------
>>> # Use SOR as stand-along solver
>>> from pyamg.relaxation.relaxation import sor
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> sor(A, x0, b, 1.33, iterations=10)
>>> print norm(b-A*x0)
3.03888724811
>>> #
>>> # Use SOR as the multigrid smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('sor', {'sweep':'symmetric', 'omega' : 1.33}),
... postsmoother=('sor', {'sweep':'symmetric', 'omega' : 1.33}))
>>> x0 = np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr', 'bsr'])
x_old = np.empty_like(x)
for i in range(iterations):
x_old[:] = x
gauss_seidel(A, x, b, iterations=1, sweep=sweep)
x *= omega
x_old *= (1-omega)
x += x_old | 0.000541 |
def get_absolute_path(some_path):
"""
This function will return an appropriate absolute path for the path it is
given. If the input is absolute, it will return unmodified; if the input is
relative, it will be rendered as relative to the current working directory.
"""
if os.path.isabs(some_path):
return some_path
else:
return evaluate_relative_path(os.getcwd(), some_path) | 0.002398 |
def pack_ihex(type_, address, size, data):
"""Create a Intel HEX record of given data.
"""
line = '{:02X}{:04X}{:02X}'.format(size, address, type_)
if data:
line += binascii.hexlify(data).decode('ascii').upper()
return ':{}{:02X}'.format(line, crc_ihex(line)) | 0.003436 |
def put_admin_metadata(self, admin_metadata):
"""Store the admin metadata."""
logger.debug("Putting admin metdata")
text = json.dumps(admin_metadata)
key = self.get_admin_metadata_key()
self.put_text(key, text) | 0.008 |
def _clear_mountpoint(self):
"""Clears a created mountpoint. Does not unmount it, merely deletes it."""
if self.mountpoint:
os.rmdir(self.mountpoint)
self.mountpoint = "" | 0.014218 |
def docker_windows_reverse_fileuri_adjust(fileuri):
# type: (Text) -> (Text)
r"""
On docker in windows fileuri do not contain : in path
To convert this file uri to windows compatible add : after drive letter,
so file:///E/var becomes file:///E:/var
"""
if fileuri is not None and onWindows():
if urllib.parse.urlsplit(fileuri).scheme == "file":
filesplit = fileuri.split("/")
if filesplit[3][-1] != ':':
filesplit[3] = filesplit[3]+':'
return '/'.join(filesplit)
return fileuri
raise ValueError("not a file URI")
return fileuri | 0.001555 |
def Ctrl_C(self, delay=0):
"""Ctrl + C shortcut.
"""
self._delay(delay)
self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Ctrl, 1)))
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.C, 1)))
self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Ctrl, 1))) | 0.006173 |
def add_error(self, txt):
"""Add a message in the configuration errors list so we can print them
all in one place
Set the object configuration as not correct
:param txt: error message
:type txt: str
:return: None
"""
self.configuration_errors.append(txt)
self.conf_is_correct = False | 0.005571 |
def export(self, name, columns, points):
"""Write the points to the CouchDB server."""
logger.debug("Export {} stats to CouchDB".format(name))
# Create DB input
data = dict(zip(columns, points))
# Set the type to the current stat name
data['type'] = name
data['time'] = couchdb.mapping.DateTimeField()._to_json(datetime.now())
# Write input to the CouchDB database
# Result can be view: http://127.0.0.1:5984/_utils
try:
self.client[self.db].save(data)
except Exception as e:
logger.error("Cannot export {} stats to CouchDB ({})".format(name, e)) | 0.004539 |
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop()) | 0.001682 |
def bundle_models(models):
"""Create a bundle of selected `models`. """
custom_models = _get_custom_models(models)
if custom_models is None:
return None
key = calc_cache_key(custom_models)
bundle = _bundle_cache.get(key, None)
if bundle is None:
try:
_bundle_cache[key] = bundle = _bundle_models(custom_models)
except CompilationError as error:
print("Compilation failed:", file=sys.stderr)
print(str(error), file=sys.stderr)
sys.exit(1)
return bundle | 0.001812 |
def pack_args_by_32(holder, maxlen, arg, typ, context, placeholder,
dynamic_offset_counter=None, datamem_start=None, zero_pad_i=None, pos=None):
"""
Copy necessary variables to pre-allocated memory section.
:param holder: Complete holder for all args
:param maxlen: Total length in bytes of the full arg section (static + dynamic).
:param arg: Current arg to pack
:param context: Context of arg
:param placeholder: Static placeholder for static argument part.
:param dynamic_offset_counter: position counter stored in static args.
:param dynamic_placeholder: pointer to current position in memory to write dynamic values to.
:param datamem_start: position where the whole datemem section starts.
"""
if isinstance(typ, BaseType):
if isinstance(arg, LLLnode):
value = unwrap_location(arg)
else:
value = parse_expr(arg, context)
value = base_type_conversion(value, value.typ, typ, pos)
holder.append(LLLnode.from_list(['mstore', placeholder, value], typ=typ, location='memory'))
elif isinstance(typ, ByteArrayLike):
if isinstance(arg, LLLnode): # Is prealloacted variable.
source_lll = arg
else:
source_lll = parse_expr(arg, context)
# Set static offset, in arg slot.
holder.append(LLLnode.from_list(['mstore', placeholder, ['mload', dynamic_offset_counter]]))
# Get the biginning to write the ByteArray to.
dest_placeholder = LLLnode.from_list(
['add', datamem_start, ['mload', dynamic_offset_counter]],
typ=typ, location='memory', annotation="pack_args_by_32:dest_placeholder")
copier = make_byte_array_copier(dest_placeholder, source_lll, pos=pos)
holder.append(copier)
# Add zero padding.
new_maxlen = ceil32(source_lll.typ.maxlen)
holder.append([
'with', '_ceil32_end', ['ceil32', ['mload', dest_placeholder]], [
'seq', ['with', '_bytearray_loc', dest_placeholder, [
'seq', ['repeat', zero_pad_i, ['mload', '_bytearray_loc'], new_maxlen, [
'seq',
# stay within allocated bounds
['if', ['ge', ['mload', zero_pad_i], '_ceil32_end'], 'break'],
[
'mstore8',
['add', ['add', '_bytearray_loc', 32], ['mload', zero_pad_i]],
0,
],
]],
]],
]
])
# Increment offset counter.
increment_counter = LLLnode.from_list([
'mstore', dynamic_offset_counter,
[
'add',
['add', ['mload', dynamic_offset_counter], ['ceil32', ['mload', dest_placeholder]]],
32,
],
], annotation='Increment dynamic offset counter')
holder.append(increment_counter)
elif isinstance(typ, ListType):
maxlen += (typ.count - 1) * 32
typ = typ.subtype
def check_list_type_match(provided): # Check list types match.
if provided != typ:
raise TypeMismatchException(
"Log list type '%s' does not match provided, expected '%s'" % (provided, typ)
)
# List from storage
if isinstance(arg, ast.Attribute) and arg.value.id == 'self':
stor_list = context.globals[arg.attr]
check_list_type_match(stor_list.typ.subtype)
size = stor_list.typ.count
mem_offset = 0
for i in range(0, size):
storage_offset = i
arg2 = LLLnode.from_list(
['sload', ['add', ['sha3_32', Expr(arg, context).lll_node], storage_offset]],
typ=typ,
)
holder, maxlen = pack_args_by_32(
holder,
maxlen,
arg2,
typ,
context,
placeholder + mem_offset,
pos=pos,
)
mem_offset += get_size_of_type(typ) * 32
# List from variable.
elif isinstance(arg, ast.Name):
size = context.vars[arg.id].size
pos = context.vars[arg.id].pos
check_list_type_match(context.vars[arg.id].typ.subtype)
mem_offset = 0
for _ in range(0, size):
arg2 = LLLnode.from_list(pos + mem_offset, typ=typ, location='memory')
holder, maxlen = pack_args_by_32(
holder,
maxlen,
arg2,
typ,
context,
placeholder + mem_offset,
pos=pos,
)
mem_offset += get_size_of_type(typ) * 32
# List from list literal.
else:
mem_offset = 0
for arg2 in arg.elts:
holder, maxlen = pack_args_by_32(
holder,
maxlen,
arg2,
typ,
context,
placeholder + mem_offset,
pos=pos,
)
mem_offset += get_size_of_type(typ) * 32
return holder, maxlen | 0.002572 |
def __vDecodeDIGICAMControl(self, mCommand_Long):
'''Session'''
if mCommand_Long.param1 != 0:
print ("Session = %d" % mCommand_Long.param1)
'''Zooming Step Value'''
if mCommand_Long.param2 != 0:
print ("Zooming Step = %d" % mCommand_Long.param2)
'''Zooming Step Value'''
if mCommand_Long.param3 != 0:
print ("Zooming Value = %d" % mCommand_Long.param3)
if (mCommand_Long.param3 == 1):
self.__vCmdCamZoomIn()
elif (mCommand_Long.param3 == -1):
self.__vCmdCamZoomOut()
else:
print ("Invalid Zoom Value")
'''Focus 0=Unlock/1=Lock/2=relock'''
if mCommand_Long.param4 != 0:
print ("Focus = %d" % mCommand_Long.param4)
'''Trigger'''
if mCommand_Long.param5 != 0:
print ("Trigger = %d" % mCommand_Long.param5)
self.__vCmdCamTrigger(mCommand_Long) | 0.011858 |
def set_server(self, wsgi_app, fnc_serve=None):
"""
figures out how the wsgi application is to be served
according to config
"""
self.set_wsgi_app(wsgi_app)
ssl_config = self.get_config("ssl")
ssl_context = {}
if self.get_config("server") == "gevent":
if ssl_config.get("enabled"):
ssl_context["certfile"] = ssl_config.get("cert")
ssl_context["keyfile"] = ssl_config.get("key")
from gevent.pywsgi import WSGIServer
http_server = WSGIServer(
(self.host, self.port),
wsgi_app,
**ssl_context
)
self.log.debug("Serving WSGI via gevent.pywsgi.WSGIServer")
fnc_serve = http_server.serve_forever
elif self.get_config("server") == "uwsgi":
self.pluginmgr_config["start_manual"] = True
elif self.get_config("server") == "gunicorn":
self.pluginmgr_config["start_manual"] = True
elif self.get_config("server") == "self":
fnc_serve = self.run
# figure out async handler
if self.get_config("async") == "gevent":
# handle async via gevent
import gevent
self.log.debug("Handling wsgi on gevent")
self.worker = gevent.spawn(fnc_serve)
elif self.get_config("async") == "thread":
self.worker = fnc_serve
else:
self.worker = fnc_serve | 0.00132 |
def _needs_git(func):
"""
Small decorator to make sure we have the git repo, or report error
otherwise.
"""
@wraps(func)
def myfunc(*args, **kwargs):
if not WITH_GIT:
raise RuntimeError(
"Dulwich library not available, can't extract info from the "
"git repos."
)
return func(*args, **kwargs)
return myfunc | 0.002451 |
def publish_pushdb_changes_to_remote_scm(self, pushdb_file, coordinate, tag_name, tag_message,
postscript=None):
"""Push pushdb changes to the remote scm repository, and then tag the commit if it succeeds."""
self._add_pushdb(pushdb_file)
self.commit_pushdb(coordinate, postscript=postscript)
self._push_and_tag_changes(
tag_name=tag_name,
tag_message='{message}{postscript}'.format(message=tag_message, postscript=postscript or '')
) | 0.009804 |
def get_Generic_parameters(tp, generic_supertype):
"""tp must be a subclass of generic_supertype.
Retrieves the type values from tp that correspond to parameters
defined by generic_supertype.
E.g. get_Generic_parameters(tp, typing.Mapping) is equivalent
to get_Mapping_key_value(tp) except for the error message.
Note that get_Generic_itemtype(tp) is not exactly equal to
get_Generic_parameters(tp, typing.Container), as that method
additionally contains treatment for typing.Tuple and typing.Iterable.
"""
try:
res = _select_Generic_superclass_parameters(tp, generic_supertype)
except TypeError:
res = None
if res is None:
raise TypeError("%s has no proper parameters defined by %s."%
(type_str(tp), type_str(generic_supertype)))
else:
return tuple(res) | 0.003505 |
def send_file(self, recipient_id, file_path, notification_type=NotificationType.regular):
"""Send file to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/file-attachment
Input:
recipient_id: recipient id to send to
file_path: path to file to be sent
Output:
Response from API as <dict>
"""
return self.send_attachment(recipient_id, "file", file_path, notification_type) | 0.007921 |
def list(self, **kwds):
"""
Endpoint: /albums/list.json
Returns a list of Album objects.
"""
albums = self._client.get("/albums/list.json", **kwds)["result"]
albums = self._result_to_list(albums)
return [Album(self._client, album) for album in albums] | 0.006494 |
def get_cpu_usage(self, cpu_id=0):
"""
Shows cpu usage in seconds, "cpu_id" is ignored.
:returns: cpu usage in seconds
"""
cpu_usage = yield from self._hypervisor.send('vm cpu_usage "{name}" {cpu_id}'.format(name=self._name, cpu_id=cpu_id))
return int(cpu_usage[0]) | 0.009524 |
def assert_match(actual_char_or_str, expected_char_or_str):
"""If values don't match, print them and raise a ValueError, otherwise,
continue
Raises: ValueError if argumetns do not match"""
if expected_char_or_str != actual_char_or_str:
print("Expected")
pprint(expected_char_or_str)
print("")
print("Got")
pprint(actual_char_or_str)
raise ValueError() | 0.00241 |
def get_input_geo(geo):
"""Similar to :meth:`get_input_peer`, but for geo points"""
try:
if geo.SUBCLASS_OF_ID == 0x430d225: # crc32(b'InputGeoPoint'):
return geo
except AttributeError:
_raise_cast_fail(geo, 'InputGeoPoint')
if isinstance(geo, types.GeoPoint):
return types.InputGeoPoint(lat=geo.lat, long=geo.long)
if isinstance(geo, types.GeoPointEmpty):
return types.InputGeoPointEmpty()
if isinstance(geo, types.MessageMediaGeo):
return get_input_geo(geo.geo)
if isinstance(geo, types.Message):
return get_input_geo(geo.media)
_raise_cast_fail(geo, 'InputGeoPoint') | 0.001499 |
def deferFunction(self, callable):
"""
Schedule a function handler to be called just before completion.
This is used for handling function bodies, which must be deferred
because code later in the file might modify the global scope. When
`callable` is called, the scope at the time this is called will be
restored, however it will contain any new bindings added to it.
"""
self._deferredFunctions.append((callable, self.scopeStack[:], self.offset)) | 0.005859 |
def init_environment():
"""Allow variables assigned in .env available using
os.environ.get('VAR_NAME')"""
base_path = os.path.abspath(os.path.dirname(__file__))
env_path = '{0}/.env'.format(base_path)
if os.path.exists(env_path):
with open(env_path) as f:
lines = f.readlines()
for line in lines:
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1] | 0.003824 |
async def dump_blob(writer, elem, elem_type, params=None):
"""
Dumps blob message to the writer.
Supports both blob and raw value.
:param writer:
:param elem:
:param elem_type:
:param params:
:return:
"""
elem_is_blob = isinstance(elem, BlobType)
elem_params = elem if elem_is_blob or elem_type is None else elem_type
data = bytes(getattr(elem, BlobType.DATA_ATTR) if elem_is_blob else elem)
if not elem_params.FIX_SIZE:
await dump_uvarint(writer, len(elem))
elif len(data) != elem_params.SIZE:
raise ValueError('Fixed size blob has not defined size: %s' % elem_params.SIZE)
await writer.awrite(data) | 0.00295 |
def add_file(self, filename, file_content):
"""Add a file for Document and Report types.
Example::
document = tcex.batch.group('Document', 'My Document')
document.add_file('my_file.txt', 'my contents')
Args:
filename (str): The name of the file.
file_content (bytes|method|str): The contents of the file or callback to get contents.
"""
self._group_data['fileName'] = filename
self._file_content = file_content | 0.005894 |
def print_timers(self):
''' PRINT EXECUTION TIMES FOR THE LIST OF PROGRAMS '''
self.timer += time()
total_time = self.timer
tmp = '* %s *'
debug.log(
'',
'* '*29,
tmp%(' '*51),
tmp%('%s %s %s'%('Program Name'.ljust(20), 'Status'.ljust(7), 'Execute Time (H:M:S)')),
tmp%('='*51)
)
for name in self.list:
if self.exists(name):
timer = getattr(self, name).get_time()
status = getattr(self, name).get_status()
self.timer -= timer
debug.log(tmp%(self.return_timer(name, status, timer)))
else:
debug.log(tmp%("%s %s -- : -- : --"%(name[:20].ljust(20),' '*8)))
debug.log(
tmp%(self.return_timer('Wrapper', '', self.timer)),
tmp%('='*51),
tmp%(self.return_timer('Total', '', total_time)),
tmp%(' '*51),
'* '*29,
''
) | 0.025397 |
def _underscore_to_camelcase(value):
"""
Convert Python snake case back to mixed case.
"""
def camelcase():
yield str.lower
while True:
yield str.capitalize
c = camelcase()
return "".join(next(c)(x) if x else '_' for x in value.split("_")) | 0.006098 |
def Vfgs(self, T=None, P=None):
r'''Volume fractions of all species in a hypothetical pure-gas phase
at the current or specified temperature and pressure. If temperature
or pressure are specified, the non-specified property is assumed to be
that of the mixture. Note this is a method, not a property. Volume
fractions are calculated based on **pure species volumes only**.
Examples
--------
>>> Mixture(['sulfur hexafluoride', 'methane'], zs=[.2, .9], T=315).Vfgs()
[0.18062059238682632, 0.8193794076131737]
>>> S = Mixture(['sulfur hexafluoride', 'methane'], zs=[.1, .9])
>>> S.Vfgs(P=1E2)
[0.0999987466608421, 0.9000012533391578]
'''
if (T is None or T == self.T) and (P is None or P == self.P):
Vmgs = self.Vmgs
else:
if T is None: T = self.T
if P is None: P = self.P
Vmgs = [i(T, P) for i in self.VolumeGases]
if none_and_length_check([Vmgs]):
return zs_to_Vfs(self.zs, Vmgs)
return None | 0.009083 |
def toString(self):
"""
Converts the data about this view widget into a string value.
:return <str>
"""
xprofile = self.toXml()
projex.text.xmlindent(xprofile)
return ElementTree.tostring(xprofile) | 0.011194 |
def transformer_moe_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.norm_type = "layer"
hparams.hidden_size = 512
hparams.batch_size = 4096
hparams.max_length = 2001
hparams.max_input_seq_length = 2000
hparams.max_target_seq_length = 2000
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 2000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 5
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.num_sampled_classes = 0
hparams.label_smoothing = 0.0
hparams.shared_embedding_and_softmax_weights = True
# According to noam, ("n", "da") seems better for harder-to-learn models
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
# Hparams used by transformer_prepare_decoder() function
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("proximity_bias", False)
hparams.add_hparam("causal_decoder_self_attention", True)
hparams = common_attention.add_standard_attention_hparams(hparams)
# Decoder layers type. If set, num_decoder_layers parameter will be ignored
# and the number of decoder layer will be deduced from the string
# See top file comment for example of usage
hparams.add_hparam("layer_types", "")
# Default attention type (ex: a, loc, red,...) and feed-forward type (ex: fc,
# sep, moe,...)
hparams.add_hparam("default_att", "a")
hparams.add_hparam("default_ff", "fc")
return hparams | 0.023375 |
def use_in(ContentHandler):
"""
Modify ContentHandler, a sub-class of
pycbc_glue.ligolw.LIGOLWContentHandler, to cause it to use the Table,
Column, and Stream classes defined in this module when parsing XML
documents.
Example:
>>> from pycbc_glue.ligolw import ligolw
>>> class LIGOLWContentHandler(ligolw.LIGOLWContentHandler):
... pass
...
>>> use_in(LIGOLWContentHandler)
<class 'pycbc_glue.ligolw.table.LIGOLWContentHandler'>
"""
def startColumn(self, parent, attrs):
return Column(attrs)
def startStream(self, parent, attrs, __orig_startStream = ContentHandler.startStream):
if parent.tagName == ligolw.Table.tagName:
parent._end_of_columns()
return TableStream(attrs).config(parent)
return __orig_startStream(self, parent, attrs)
def startTable(self, parent, attrs):
return Table(attrs)
ContentHandler.startColumn = startColumn
ContentHandler.startStream = startStream
ContentHandler.startTable = startTable
return ContentHandler | 0.030769 |
def __do_query_level(self):
"""Helper to perform the actual query the current dimmer level of the
output. For pure on/off loads the result is either 0.0 or 100.0."""
self._lutron.send(Lutron.OP_QUERY, Output._CMD_TYPE, self._integration_id,
Output._ACTION_ZONE_LEVEL) | 0.006873 |
def show_args():
'''
Show which arguments map to which flags and options.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' logadm.show_args
'''
mapping = {'flags': {}, 'options': {}}
for flag, arg in option_toggles.items():
mapping['flags'][flag] = arg
for option, arg in option_flags.items():
mapping['options'][option] = arg
return mapping | 0.00232 |
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
for stddev_type in stddev_types)
C = self.COEFFS[imt]
mean = self._compute_mean(C, rup.mag, dists.rjb)
stddevs = self._compute_stddevs(C, rup.mag, dists.rjb, imt,
stddev_types)
# apply decay factor for 3 and 4 seconds (not originally supported
# by the equations)
if imt.period == 3.0:
mean /= 0.612
if imt.period == 4.0:
mean /= 0.559
return mean, stddevs | 0.002427 |
def begin_subsegment(self, name, namespace='local'):
"""
Begin a new subsegment.
If there is open subsegment, the newly created subsegment will be the
child of latest opened subsegment.
If not, it will be the child of the current open segment.
:param str name: the name of the subsegment.
:param str namespace: currently can only be 'local', 'remote', 'aws'.
"""
segment = self.current_segment()
if not segment:
log.warning("No segment found, cannot begin subsegment %s." % name)
return None
if not segment.sampled:
subsegment = DummySubsegment(segment, name)
else:
subsegment = Subsegment(name, namespace, segment)
self.context.put_subsegment(subsegment)
return subsegment | 0.002387 |
def analisar(retorno):
"""Constrói uma :class:`RespostaExtrairLogs` a partir do retorno
informado.
:param unicode retorno: Retorno da função ``ExtrairLogs``.
"""
resposta = analisar_retorno(forcar_unicode(retorno),
funcao='ExtrairLogs',
classe_resposta=RespostaExtrairLogs,
campos=RespostaSAT.CAMPOS + (
('arquivoLog', unicode),
),
campos_alternativos=[
# se a extração dos logs falhar espera-se o padrão de
# campos no retorno...
RespostaSAT.CAMPOS,
]
)
if resposta.EEEEE not in ('15000',):
raise ExcecaoRespostaSAT(resposta)
return resposta | 0.006075 |
def setup(app):
"""Setup sphinx-gallery sphinx extension"""
sphinx_compatibility._app = app
app.add_config_value('sphinx_gallery_conf', DEFAULT_GALLERY_CONF, 'html')
for key in ['plot_gallery', 'abort_on_example_error']:
app.add_config_value(key, get_default_config_value(key), 'html')
try:
app.add_css_file('gallery.css')
except AttributeError: # Sphinx < 1.8
app.add_stylesheet('gallery.css')
# Sphinx < 1.6 calls it `_extensions`, >= 1.6 is `extensions`.
extensions_attr = '_extensions' if hasattr(
app, '_extensions') else 'extensions'
if 'sphinx.ext.autodoc' in getattr(app, extensions_attr):
app.connect('autodoc-process-docstring', touch_empty_backreferences)
app.connect('builder-inited', generate_gallery_rst)
app.connect('build-finished', copy_binder_files)
app.connect('build-finished', summarize_failing_examples)
app.connect('build-finished', embed_code_links)
metadata = {'parallel_read_safe': True,
'parallel_write_safe': False,
'version': _sg_version}
return metadata | 0.000893 |
def search_continuous_sets(self, dataset_id):
"""
Returns an iterator over the ContinuousSets fulfilling the specified
conditions from the specified Dataset.
:param str dataset_id: The ID of the
:class:`ga4gh.protocol.Dataset` of interest.
:return: An iterator over the :class:`ga4gh.protocol.ContinuousSet`
objects defined by the query parameters.
"""
request = protocol.SearchContinuousSetsRequest()
request.dataset_id = dataset_id
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "continuoussets", protocol.SearchContinuousSetsResponse) | 0.002886 |
def array_map(ol,map_func,*args):
'''
obseleted,just for compatible
from elist.elist import *
ol = [1,2,3,4]
def map_func(ele,mul,plus):
return(ele*mul+plus)
array_map(ol,map_func,2,100)
'''
rslt = list(map(lambda ele:map_func(ele,*args),ol))
return(rslt) | 0.018519 |
def step(step_name=None):
"""
Decorates functions that will be called by the `run` function.
Decorator version of `add_step`. step name defaults to
name of function.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
"""
def decorator(func):
if step_name:
name = step_name
else:
name = func.__name__
add_step(name, func)
return func
return decorator | 0.001558 |
def get_nodes(self, request):
"""
Return menu's node for categories
"""
nodes = []
nodes.append(NavigationNode(_('Categories'),
reverse('zinnia:category_list'),
'categories'))
for category in Category.objects.all():
nodes.append(NavigationNode(category.title,
category.get_absolute_url(),
category.pk, 'categories'))
return nodes | 0.003643 |
def generate_adhoc_ssl_context():
"""Generates an adhoc SSL context for the development server."""
from OpenSSL import SSL
cert, pkey = generate_adhoc_ssl_pair()
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_privatekey(pkey)
ctx.use_certificate(cert)
return ctx | 0.003472 |
def daylight_utc(self, date, latitude, longitude, observer_elevation=0):
"""Calculate daylight start and end times in the UTC timezone.
:param date: Date to calculate for.
:type date: :class:`datetime.date`
:param latitude: Latitude - Northern latitudes should be positive
:type latitude: float
:param longitude: Longitude - Eastern longitudes should be positive
:type longitude: float
:param observer_elevation: Elevation in metres to calculate daylight for
:type observer_elevation: int
:return: A tuple of the UTC date and time at which daylight starts and ends.
:rtype: (:class:`~datetime.datetime`, :class:`~datetime.datetime`)
"""
start = self.sunrise_utc(date, latitude, longitude, observer_elevation)
end = self.sunset_utc(date, latitude, longitude, observer_elevation)
return start, end | 0.004242 |
def _general_error_handler(http_error):
''' Simple error handler for azure.'''
message = str(http_error)
if http_error.respbody is not None:
message += '\n' + http_error.respbody.decode('utf-8-sig')
raise AzureHttpError(message, http_error.status) | 0.00369 |
def avg_n_nplusone(x):
""" returns x[0]/2, (x[0]+x[1])/2, ... (x[-2]+x[-1])/2, x[-1]/2
"""
y = np.zeros(1 + x.shape[0])
hx = 0.5 * x
y[:-1] = hx
y[1:] += hx
return y | 0.020305 |
def new(self, filename, encoding, text, default_content=False,
empty=False):
"""
Create new filename with *encoding* and *text*
"""
finfo = self.create_new_editor(filename, encoding, text,
set_current=False, new=True)
finfo.editor.set_cursor_position('eof')
if not empty:
finfo.editor.insert_text(os.linesep)
if default_content:
finfo.default = True
finfo.editor.document().setModified(False)
return finfo | 0.005272 |
def multiple_chunks(self, chunk_size=None):
"""
Returns ``True`` if you can expect multiple chunks.
NB: If a particular file representation is in memory, subclasses should
always return ``False`` -- there's no good reason to read from memory in
chunks.
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
return self.size > chunk_size | 0.00716 |
def get_attrs(model_field, disabled=False):
"""Set attributes on the display widget."""
attrs = {}
attrs['class'] = 'span6 xlarge'
if disabled or isinstance(model_field, ObjectIdField):
attrs['class'] += ' disabled'
attrs['readonly'] = 'readonly'
return attrs | 0.00339 |
def selected_item(self):
"""
:obj:`consolemenu.items.MenuItem`: The item in :attr:`items` that the user most recently selected, or None.
"""
if self.items and self.selected_option != -1:
return self.items[self.current_option]
else:
return None | 0.00974 |
def load_caffe(model, defPath, modelPath, match_all=True, bigdl_type="float"):
"""
Load a pre-trained Caffe model.
:param model: A bigdl model definition \which equivalent to the pre-trained caffe model.
:param defPath: The path containing the caffe model definition.
:param modelPath: The path containing the pre-trained caffe model.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "loadCaffe", model, defPath, modelPath, match_all)
return Layer.of(jmodel) | 0.009058 |
def get_help_topics(self) -> List[str]:
""" Returns a list of help topics """
return [name[len(HELP_FUNC_PREFIX):] for name in self.get_names()
if name.startswith(HELP_FUNC_PREFIX) and callable(getattr(self, name))] | 0.012146 |
def command(cmd, shell=True, echo=True, suffix=None):
"""SSH: Run the given command over SSH as defined in environment"""
if env():
cij.err("cij.ssh.command: Invalid SSH environment")
return 1
prefix = []
if cij.ENV.get("SSH_CMD_TIME") == "1":
prefix.append("/usr/bin/time")
if cij.ENV.get("SSH_CMD_TIMEOUT"):
prefix.append("timeout")
prefix.append(cij.ENV.get("SSH_CMD_TIMEOUT"))
prefix.append("ssh")
args = []
if cij.ENV.get("SSH_KEY"):
args.append("-i")
args.append(cij.ENV.get("SSH_KEY"))
if cij.ENV.get("SSH_PORT"):
args.append("-p")
args.append(cij.ENV.get("SSH_PORT"))
args.append("@".join([cij.ENV.get("SSH_USER"), cij.ENV.get("SSH_HOST")]))
wrapped = prefix + args + ["'%s'" % " ".join(cmd)]
if suffix:
wrapped += suffix
return cij.util.execute(wrapped, shell, echo) | 0.001089 |
def url_unsplit (parts):
"""Rejoin URL parts to a string."""
if parts[2] == default_ports.get(parts[0]):
return "%s://%s%s" % (parts[0], parts[1], parts[3])
return "%s://%s:%d%s" % parts | 0.009709 |
def _normalize_field_name(value):
"""Normalizing value string used as key and field name.
:param value: String to normalize
:type value: str
:return: normalized string
:rtype: str
"""
# Replace non word/letter character
return_value = re.sub(r'[^\w\s-]+', '', value)
# Replaces whitespace with underscores
return_value = re.sub(r'\s+', '_', return_value)
return return_value.lower() | 0.002336 |
def delete(self, id=None):
"""
Delete a record from the database
:param id: The id of the row to delete
:type id: mixed
:return: The number of rows deleted
:rtype: int
"""
if id is not None:
self.where('id', '=', id)
sql = self._grammar.compile_delete(self)
return self._connection.delete(sql, self.get_bindings()) | 0.004878 |
def to_python(self, value: Optional[str]) -> Optional[Any]:
"""
Called during deserialization and during form ``clean()`` calls.
Must deal with an instance of the correct type; a string; or ``None``
(if the field allows ``null=True``).
Should raise ``ValidationError`` if problems.
"""
# https://docs.djangoproject.com/en/1.8/howto/custom-model-fields/
# log.debug("to_python: {}, {}", value, type(value))
if isinstance(value, datetime.datetime):
return value
if value is None:
return value
if value == '':
return None
return iso_string_to_python_datetime(value) | 0.002882 |
def measureSize(self, diff, chunkSize):
""" Spend some time to get an accurate size. """
self._fileSystemSync()
sendContext = self.butter.send(
self.getSendPath(diff.toVol),
self.getSendPath(diff.fromVol),
diff,
showProgress=self.showProgress is not False,
allowDryRun=False,
)
class _Measure(io.RawIOBase):
def __init__(self, estimatedSize, showProgress):
self.totalSize = None
self.progress = progress.DisplayProgress(estimatedSize) if showProgress else None
def __enter__(self):
self.totalSize = 0
if self.progress:
self.progress.__enter__()
return self
def __exit__(self, exceptionType, exceptionValue, traceback):
if self.progress:
self.progress.__exit__(exceptionType, exceptionValue, traceback)
return False # Don't supress exception
def writable(self):
return True
def write(self, bytes):
self.totalSize += len(bytes)
if self.progress:
self.progress.update(self.totalSize)
logger.info("Measuring %s", diff)
measure = _Measure(diff.size, self.showProgress is not False)
Store.transfer(sendContext, measure, chunkSize)
diff.setSize(measure.totalSize, False)
for path in self.getPaths(diff.toVol):
path = self._fullPath(path) + Store.theInfoExtension
with open(path, "a") as infoFile:
diff.toVol.writeInfoLine(infoFile, diff.fromUUID, measure.totalSize) | 0.002889 |
def exit_with_error(msg):
'''
:param msg: string message to print before exiting
Print the error message, as well as a blurb on where to find the
job workspaces
'''
msg += '\n'
msg += 'Local job workspaces can be found in: ' + str(environ.get('DX_TEST_JOB_HOMEDIRS'))
sys.exit(msg) | 0.006369 |
def absent(name):
'''
Ensures that the named bridge does not exist, eventually deletes it.
Args:
name: The name of the bridge.
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
# Comment and change messages
comment_bridge_deleted = 'Bridge {0} deleted.'.format(name)
comment_bridge_notdeleted = 'Unable to delete bridge: {0}.'.format(name)
comment_bridge_notexists = 'Bridge {0} does not exist.'.format(name)
changes_bridge_deleted = {name: {'old': 'Bridge {0} exists.'.format(name),
'new': 'Bridge {0} deleted.'.format(name),
}
}
bridge_exists = __salt__['openvswitch.bridge_exists'](name)
# Dry run, test=true mode
if __opts__['test']:
if not bridge_exists:
ret['result'] = True
ret['comment'] = comment_bridge_notexists
else:
ret['result'] = None
ret['comment'] = comment_bridge_deleted
return ret
if not bridge_exists:
ret['result'] = True
ret['comment'] = comment_bridge_notexists
else:
bridge_delete = __salt__['openvswitch.bridge_delete'](name)
if bridge_delete:
ret['result'] = True
ret['comment'] = comment_bridge_deleted
ret['changes'] = changes_bridge_deleted
else:
ret['result'] = False
ret['comment'] = comment_bridge_notdeleted
return ret | 0.000653 |
def get_by_details(self, name, type, clazz):
"""Gets an entry by details. Will return None if there is
no matching entry."""
entry = DNSEntry(name, type, clazz)
return self.get(entry) | 0.009259 |
def extend(self, iterable):
""" Extends the `Sequence` by appending items from the *iterable*.
:param iterable: any *iterable* that contains items of :class:`Structure`,
:class:`Sequence`, :class:`Array` or :class:`Field` instances. If the
*iterable* is one of these instances itself then the *iterable* itself
is appended to the `Sequence`.
"""
# Sequence
if is_sequence(iterable):
self._data.extend(iterable)
# Structure
elif is_structure(iterable):
members = [item for item in iterable.values()]
self._data.extend(members)
# Field
elif is_field(iterable):
self._data.extend([iterable])
# Iterable
elif isinstance(iterable, (set, tuple, list)):
self._data.extend(Sequence(iterable))
else:
raise MemberTypeError(self, iterable, member=len(self)) | 0.005258 |
def respond(self):
"""Process the current request.
From :pep:`333`:
The start_response callable must not actually transmit
the response headers. Instead, it must store them for the
server or gateway to transmit only after the first
iteration of the application return value that yields
a NON-EMPTY string, or upon the application's first
invocation of the write() callable.
"""
response = self.req.server.wsgi_app(self.env, self.start_response)
try:
for chunk in filter(None, response):
if not isinstance(chunk, six.binary_type):
raise ValueError('WSGI Applications must yield bytes')
self.write(chunk)
finally:
# Send headers if not already sent
self.req.ensure_headers_sent()
if hasattr(response, 'close'):
response.close() | 0.002077 |
def fetch_docker_tags(self):
"""
Export all dockerhub tags associated with each component given by
the -t flag.
"""
# dict to store the already parsed components (useful when forks are
# given to the pipeline string via -t flag
dict_of_parsed = {}
# fetches terminal width and subtracts 3 because we always add a
# new line character and we want a space at the beggining and at the end
# of each line
terminal_width = shutil.get_terminal_size().columns - 3
# first header
center_string = " Selected container tags "
# starts a list with the headers
tags_list = [
[
"=" * int(terminal_width / 4),
"{0}{1}{0}".format(
"=" * int(((terminal_width/2 - len(center_string)) / 2)),
center_string)
,
"{}\n".format("=" * int(terminal_width / 4))
],
["component", "container", "tags"],
[
"=" * int(terminal_width / 4),
"=" * int(terminal_width / 2),
"=" * int(terminal_width / 4)
]
]
# Skip first init process and iterate through the others
for p in self.processes[1:]:
template = p.template
# if component has already been printed then skip and don't print
# again
if template in dict_of_parsed:
continue
# starts a list of containers for the current process in
# dict_of_parsed, in which each containers will be added to this
# list once it gets parsed
dict_of_parsed[template] = {
"container": []
}
# fetch repo name from directives of each component.
for directives in p.directives.values():
try:
repo = directives["container"]
default_version = directives["version"]
except KeyError:
# adds the default container if container key isn't present
# this happens for instance in integrity_coverage
repo = "flowcraft/flowcraft_base"
default_version = "1.0.0-1"
# checks if repo_version already exists in list of the
# containers for the current component being queried
repo_version = repo + default_version
if repo_version not in dict_of_parsed[template]["container"]:
# make the request to docker hub
r = requests.get(
"https://hub.docker.com/v2/repositories/{}/tags/"
.format(repo)
)
# checks the status code of the request, if it is 200 then
# parses docker hub entry, otherwise retrieve no tags but
# alerts the user
if r.status_code != 404:
# parse response content to dict and fetch results key
r_content = json.loads(r.content)["results"]
for version in r_content:
printed_version = (version["name"] + "*") \
if version["name"] == default_version \
else version["name"]
tags_list.append([template, repo, printed_version])
else:
tags_list.append([template, repo, "No DockerHub tags"])
dict_of_parsed[template]["container"].append(repo_version)
# iterate through each entry in tags_list and print the list of tags
# for each component. Each entry (excluding the headers) contains
# 3 elements (component name, container and tag version)
for x, entry in enumerate(tags_list):
# adds different color to the header in the first list and
# if row is pair add one color and if is even add another (different
# background)
color = "blue_bold" if x < 3 else \
("white" if x % 2 != 0 else "0;37;40m")
# generates a small list with the terminal width for each column,
# this will be given to string formatting as the 3, 4 and 5 element
final_width = [
int(terminal_width/4),
int(terminal_width/2),
int(terminal_width/4)
]
# writes the string to the stdout
sys.stdout.write(
colored_print("\n {0: <{3}} {1: ^{4}} {2: >{5}}".format(
*entry, *final_width), color)
)
# assures that the entire line gets the same color
sys.stdout.write("\n{0: >{1}}\n".format("(* = default)",
terminal_width + 3)) | 0.001002 |
def set_sflow(self, name, value=None, default=False, disable=False):
"""Configures the sFlow state on the interface
Args:
name (string): The interface identifier. It must be a full
interface name (ie Ethernet, not Et)
value (boolean): True if sFlow should be enabled otherwise False
default (boolean): Specifies the default value for sFlow
disable (boolean): Specifies to disable sFlow
Returns:
True if the operation succeeds otherwise False is returned
"""
if value not in [True, False, None]:
raise ValueError
commands = ['interface %s' % name]
commands.append(self.command_builder('sflow enable', value=value,
default=default, disable=disable))
return self.configure(commands) | 0.002265 |
async def stop(self):
"""
Irreversibly stop the sender.
"""
if self.__started:
self.__transport._unregister_rtp_sender(self)
self.__rtp_task.cancel()
self.__rtcp_task.cancel()
await asyncio.gather(
self.__rtp_exited.wait(),
self.__rtcp_exited.wait()) | 0.005525 |
def read_questions(self):
"""Reads questions section of packet"""
format = '!HH'
length = struct.calcsize(format)
for i in range(0, self.num_questions):
name = self.read_name()
info = struct.unpack(format,
self.data[self.offset:self.offset + length])
self.offset += length
question = DNSQuestion(name, info[0], info[1])
self.questions.append(question) | 0.006466 |
def gen_txn_path(self, txn):
"""Return path to state as 'str' type or None"""
txn_type = get_type(txn)
if txn_type not in self.state_update_handlers:
logger.error('Cannot generate id for txn of type {}'.format(txn_type))
return None
if txn_type == NYM:
nym = get_payload_data(txn).get(TARGET_NYM)
binary_digest = domain.make_state_path_for_nym(nym)
return hexlify(binary_digest).decode()
elif txn_type == ATTRIB:
path = domain.prepare_attr_for_state(txn, path_only=True)
return path.decode()
elif txn_type == SCHEMA:
path = domain.prepare_schema_for_state(txn, path_only=True)
return path.decode()
elif txn_type == CLAIM_DEF:
path = domain.prepare_claim_def_for_state(txn, path_only=True)
return path.decode()
elif txn_type == REVOC_REG_DEF:
path = domain.prepare_revoc_def_for_state(txn, path_only=True)
return path.decode()
elif txn_type == REVOC_REG_ENTRY:
path = domain.prepare_revoc_reg_entry_for_state(txn, path_only=True)
return path.decode()
raise NotImplementedError("path construction is not implemented for type {}".format(txn_type)) | 0.003826 |
def canvas_points(self):
"""Calculates the coordinates that the data should use to paint itself
to its associated :py:class:`.AxisChart`. This is used internally to
create the chart.
:rtype: ``tuple``"""
if self.chart():
x_axis_min = self.chart().x_lower_limit()
y_axis_min = self.chart().y_lower_limit()
x_axis_max = self.chart().x_upper_limit()
y_axis_max = self.chart().y_upper_limit()
chart_width = self.chart().width()
chart_height = self.chart().height()
horizontal_padding = self.chart().horizontal_padding()
vertical_padding = self.chart().vertical_padding()
horizontal_margin_pixels = horizontal_padding * chart_width
vertical_margin_pixels = vertical_padding * chart_height
x_axis_pixels = chart_width - (2 * horizontal_margin_pixels)
y_axis_pixels = chart_height - (2 * vertical_margin_pixels)
x_axis_span = x_axis_max - x_axis_min
y_axis_span = y_axis_max - y_axis_min
x_pixels_per_point = x_axis_pixels / x_axis_span
y_pixels_per_point = y_axis_pixels / y_axis_span
data = []
for x, y in self.data():
relative_x, relative_y = x - x_axis_min, y - y_axis_min
data.append((
(relative_x * x_pixels_per_point) + horizontal_margin_pixels,
chart_height - ((relative_y * y_pixels_per_point) + vertical_margin_pixels)
))
return tuple(data) | 0.001881 |
def random_letters(count):
"""Get a series of pseudo-random letters with no repeats."""
rv = random.choice(string.ascii_uppercase)
while len(rv) < count:
l = random.choice(string.ascii_uppercase)
if not l in rv:
rv += l
return rv | 0.010989 |
def get_log_format_default(self):
"""Returns default log message format.
.. note:: Some params may be missing.
"""
vars = self.logging.vars
format_default = (
'[pid: %s|app: %s|req: %s/%s] %s (%s) {%s vars in %s bytes} [%s] %s %s => '
'generated %s bytes in %s %s%s(%s %s) %s headers in %s bytes (%s switches on core %s)' % (
vars.WORKER_PID,
'-', # app id
'-', # app req count
'-', # worker req count
vars.REQ_REMOTE_ADDR,
vars.REQ_REMOTE_USER,
vars.REQ_COUNT_VARS_CGI,
vars.SIZE_PACKET_UWSGI,
vars.REQ_START_CTIME,
vars.REQ_METHOD,
vars.REQ_URI,
vars.RESP_SIZE_BODY,
vars.RESP_TIME_MS, # or RESP_TIME_US,
'-', # tsize
'-', # via sendfile/route/offload
vars.REQ_SERVER_PROTOCOL,
vars.RESP_STATUS,
vars.RESP_COUNT_HEADERS,
vars.RESP_SIZE_HEADERS,
vars.ASYNC_SWITCHES,
vars.CORE,
))
return format_default | 0.004065 |
def timedelta(self, start, end, start_key=min, end_key=max):
"""compute the difference between two sets of timestamps
The default behavior is to use the earliest of the first
and the latest of the second list, but this can be changed
by passing a different
Parameters
----------
start : one or more datetime objects (e.g. ar.submitted)
end : one or more datetime objects (e.g. ar.received)
start_key : callable
Function to call on `start` to extract the relevant
entry [defalt: min]
end_key : callable
Function to call on `end` to extract the relevant
entry [default: max]
Returns
-------
dt : float
The time elapsed (in seconds) between the two selected timestamps.
"""
if not isinstance(start, datetime):
# handle single_result AsyncResults, where ar.stamp is single object,
# not a list
start = start_key(start)
if not isinstance(end, datetime):
# handle single_result AsyncResults, where ar.stamp is single object,
# not a list
end = end_key(end)
return _total_seconds(end - start) | 0.006934 |
def _LoadNvmlLibrary():
"""
Load the library if it isn't loaded already
"""
global nvmlLib
if (nvmlLib is None):
# lock to ensure only one caller loads the library
libLoadLock.acquire()
try:
# ensure the library still isn't loaded
if (nvmlLib is None):
try:
if (sys.platform[:3] == "win"):
searchPaths = [
os.path.join(os.getenv("ProgramFiles", r"C:\Program Files"), r"NVIDIA Corporation\NVSMI\nvml.dll"),
os.path.join(os.getenv("WinDir", r"C:\Windows"), r"System32\nvml.dll"),
]
nvmlPath = next((x for x in searchPaths if os.path.isfile(x)), None)
if (nvmlPath == None):
_nvmlCheckReturn(NVML_ERROR_LIBRARY_NOT_FOUND)
else:
# cdecl calling convention
nvmlLib = CDLL(nvmlPath)
else:
# assume linux
nvmlLib = CDLL("libnvidia-ml.so.1")
except OSError as ose:
_nvmlCheckReturn(NVML_ERROR_LIBRARY_NOT_FOUND)
if (nvmlLib == None):
_nvmlCheckReturn(NVML_ERROR_LIBRARY_NOT_FOUND)
finally:
# lock is always freed
libLoadLock.release() | 0.004098 |
def update(self, **kwargs):
"""Creates or updates a property for the instance for each parameter."""
for key, value in kwargs.items():
setattr(self, key, value) | 0.011494 |
def long2ip(l, rfc1924=False):
"""Convert a network byte order 128-bit integer to a canonical IPv6
address.
>>> long2ip(2130706433)
'::7f00:1'
>>> long2ip(42540766411282592856904266426630537217)
'2001:db8::1:0:0:1'
>>> long2ip(MIN_IP)
'::'
>>> long2ip(MAX_IP)
'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(ip2long('1080::8:800:200C:417A'), rfc1924=True)
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2ip(ip2long('::'), rfc1924=True)
'00000000000000000000'
:param l: Network byte order 128-bit integer.
:type l: int
:param rfc1924: Encode in RFC 1924 notation (base 85)
:type rfc1924: bool
:returns: Canonical IPv6 address (eg. '::1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
if rfc1924:
return long2rfc1924(l)
# format as one big hex value
hex_str = '%032x' % l
# split into double octet chunks without padding zeros
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
# find and remove left most longest run of zeros
dc_start, dc_len = (-1, 0)
run_start, run_len = (-1, 0)
for idx, hextet in enumerate(hextets):
if '0' == hextet:
run_len += 1
if -1 == run_start:
run_start = idx
if run_len > dc_len:
dc_len, dc_start = (run_len, run_start)
else:
run_len, run_start = (0, -1)
# end for
if dc_len > 1:
dc_end = dc_start + dc_len
if dc_end == len(hextets):
hextets += ['']
hextets[dc_start:dc_end] = ['']
if dc_start == 0:
hextets = [''] + hextets
# end if
return ':'.join(hextets) | 0.000852 |
def convert_uei(pinyin):
"""uei 转换,还原原始的韵母
iou,uei,uen前面加声母的时候,写成iu,ui,un。
例如niu(牛),gui(归),lun(论)。
"""
return UI_RE.sub(lambda m: m.group(1) + UI_MAP[m.group(2)], pinyin) | 0.005236 |
def _convert_choices(self, choices):
"""Auto create db values then call super method"""
final_choices = []
for choice in choices:
if isinstance(choice, ChoiceEntry):
final_choices.append(choice)
continue
original_choice = choice
if isinstance(choice, six.string_types):
if choice == _NO_SUBSET_NAME_:
continue
choice = [choice, ]
else:
choice = list(choice)
length = len(choice)
assert 1 <= length <= 4, 'Invalid number of entries in %s' % (original_choice,)
final_choice = []
# do we have attributes?
if length > 1 and isinstance(choice[-1], Mapping):
final_choice.append(choice.pop())
elif length == 4:
attributes = choice.pop()
assert attributes is None or isinstance(attributes, Mapping), 'Last argument must be a dict-like object in %s' % (original_choice,)
if attributes:
final_choice.append(attributes)
# the constant
final_choice.insert(0, choice.pop(0))
if len(choice):
# we were given a db value
final_choice.insert(1, choice.pop(0))
if len(choice):
# we were given a display value
final_choice.insert(2, choice.pop(0))
else:
# set None to compute it later
final_choice.insert(1, None)
if final_choice[1] is None:
# no db value, we compute it from the constant
final_choice[1] = self.value_transform(final_choice[0])
final_choices.append(final_choice)
return super(AutoChoices, self)._convert_choices(final_choices) | 0.002109 |
def export(self, export_auto_config=False):
"""
Export the cluster template for the given cluster. ccluster must have host
templates defined. It cluster does not have host templates defined it will
export host templates based on roles assignment.
@param export_auto_config: Also export auto configured configs
@return: Return cluster template
@since: API v12
"""
return self._get("export", ApiClusterTemplate, False,
params=dict(exportAutoConfig=export_auto_config), api_version=12) | 0.003697 |
def get_flagged_names():
"""Return a list of all filenames marked as flagged."""
l = []
for w in _widget_cache.values():
if w.flagged:
l.append(w.get_node().get_value())
return l | 0.009302 |
def run(self, redirects = []):
"""Runs the pipelines with the specified redirects and returns
a RunningPipeline instance."""
if not isinstance(redirects, redir.Redirects):
redirects = redir.Redirects(self._env._redirects, *redirects)
with copy.copy_session() as sess:
self = copy.deepcopy(self)
processes = self._run(redirects, sess)
pipeline = RunningPipeline(processes, self)
self._env.last_pipeline = pipeline
return pipeline | 0.007692 |
def count(self, Class, set=None, recursive=True,ignore=True):
"""See :meth:`AbstractElement.count`"""
if self.mode == Mode.MEMORY:
s = 0
for t in self.data:
s += sum( 1 for e in t.select(Class,recursive,True ) )
return s | 0.031142 |
def render_sparkline(self, **kwargs):
"""Render a sparkline"""
spark_options = dict(
width=200,
height=50,
show_dots=False,
show_legend=False,
show_x_labels=False,
show_y_labels=False,
spacing=0,
margin=5,
min_scale=1,
max_scale=2,
explicit_size=True,
no_data_text='',
js=(),
classes=(_ellipsis, 'pygal-sparkline')
)
spark_options.update(kwargs)
return self.render(**spark_options) | 0.003396 |
def pnl(self, account='', modelCode='') -> List[PnL]:
"""
List of subscribed :class:`.PnL` objects (profit and loss),
optionally filtered by account and/or modelCode.
The :class:`.PnL` objects are kept live updated.
Args:
account: If specified, filter for this account name.
modelCode: If specified, filter for this account model.
"""
return [v for v in self.wrapper.pnls.values() if
(not account or v.account == account) and
(not modelCode or v.modelCode == modelCode)] | 0.003425 |
def get_token(opts, tok):
'''
Fetch the token data from the store.
:param opts: Salt master config options
:param tok: Token value to get
:returns: Token data if successful. Empty dict if failed.
'''
t_path = os.path.join(opts['token_dir'], tok)
if not os.path.isfile(t_path):
return {}
serial = salt.payload.Serial(opts)
try:
with salt.utils.files.fopen(t_path, 'rb') as fp_:
tdata = serial.loads(fp_.read())
return tdata
except (IOError, OSError):
log.warning(
'Authentication failure: can not read token file "%s".', t_path)
return {} | 0.001538 |
def decoder(self, response: bytes):
"""编码请求为bytes.
检查是否使用debug模式和是否对数据进行压缩.之后根据状态将python字典形式的请求编码为字节串.
Parameters:
response (bytes): - 响应的字节串编码
Return:
(Dict[str, Any]): - python字典形式的响应
"""
response = response[:-(len(self.SEPARATOR))]
if self.compreser is not None:
response = self.compreser.decompress(response)
if self.debug is True:
response = json.loads(response.decode('utf-8'))
else:
response = msgpack.unpackb(response, encoding='utf-8')
version = response.get("MPRPC")
if version and version == self.VERSION:
return response
else:
raise ProtocolException("Wrong Protocol") | 0.002611 |
def list_subscriptions(self, target_id=None, ids=None, query_flags=None):
"""ListSubscriptions.
[Preview API]
:param str target_id:
:param [str] ids:
:param str query_flags:
:rtype: [NotificationSubscription]
"""
query_parameters = {}
if target_id is not None:
query_parameters['targetId'] = self._serialize.query('target_id', target_id, 'str')
if ids is not None:
ids = ",".join(ids)
query_parameters['ids'] = self._serialize.query('ids', ids, 'str')
if query_flags is not None:
query_parameters['queryFlags'] = self._serialize.query('query_flags', query_flags, 'str')
response = self._send(http_method='GET',
location_id='70f911d6-abac-488c-85b3-a206bf57e165',
version='5.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('[NotificationSubscription]', self._unwrap_collection(response)) | 0.005693 |
def shell_split(text):
"""
Split the string `text` using shell-like syntax
This avoids breaking single/double-quoted strings (e.g. containing
strings with spaces). This function is almost equivalent to the shlex.split
function (see standard library `shlex`) except that it is supporting
unicode strings (shlex does not support unicode until Python 2.7.3).
"""
assert is_text_string(text) # in case a QString is passed...
pattern = r'(\s+|(?<!\\)".*?(?<!\\)"|(?<!\\)\'.*?(?<!\\)\')'
out = []
for token in re.split(pattern, text):
if token.strip():
out.append(token.strip('"').strip("'"))
return out | 0.001466 |
def full_y(self, Y):
"""Add self(shunt) into full Jacobian Y"""
if not self.n:
return
Ysh = matrix(self.g,
(self.n, 1), 'd') + 1j * matrix(self.b, (self.n, 1), 'd')
uYsh = mul(self.u, Ysh)
Y += spmatrix(uYsh, self.a, self.a, Y.size, 'z') | 0.006452 |
def joined(self, a, b):
"""
Returns True if a and b are members of the same set.
"""
mapping = self._mapping
try:
return mapping[a] is mapping[b]
except KeyError:
return False | 0.008097 |
def xml(self):
"""
xml representation of the metadata.
:return: xml representation of the metadata
:rtype: ElementTree.Element
"""
tree = ElementTree.parse(METADATA_XML_TEMPLATE)
root = tree.getroot()
for name, prop in list(self.properties.items()):
path = prop.xml_path
elem = root.find(path, XML_NS)
if elem is None:
# create elem
elem = insert_xml_element(root, path)
elem.text = self.get_xml_value(name)
return root | 0.003484 |
def add_dispatcher(self, dsp, inputs, outputs, dsp_id=None,
input_domain=None, weight=None, inp_weight=None,
description=None, include_defaults=False,
await_domain=None, **kwargs):
"""
Add a single sub-dispatcher node to dispatcher.
:param dsp:
Child dispatcher that is added as sub-dispatcher node to the parent
dispatcher.
:type dsp: Dispatcher | dict[str, list]
:param inputs:
Inputs mapping. Data node ids from parent dispatcher to child
sub-dispatcher.
:type inputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param outputs:
Outputs mapping. Data node ids from child sub-dispatcher to parent
dispatcher.
:type outputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param dsp_id:
Sub-dispatcher node id.
If None will be assigned as <dsp.name>.
:type dsp_id: str, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the a dictionary with the inputs
of the sub-dispatcher node and returns True if input values satisfy
the domain, otherwise False.
.. note:: This function is invoked every time that a data node reach
the sub-dispatcher node.
:type input_domain: (dict) -> bool, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the sub-dispatcher node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, int | float], optional
:param description:
Sub-dispatcher node's description.
:type description: str, optional
:param include_defaults:
If True the default values of the sub-dispatcher are added to the
current dispatcher.
:type include_defaults: bool, optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Sub-dispatcher node id.
:rtype: str
.. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_function`,
:func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Create a sub-dispatcher::
>>> sub_dsp = Dispatcher()
>>> sub_dsp.add_function('max', max, ['a', 'b'], ['c'])
'max'
Add the sub-dispatcher to the parent dispatcher::
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher', dsp=sub_dsp,
... inputs={'A': 'a', 'B': 'b'},
... outputs={'c': 'C'})
'Sub-Dispatcher'
Add a sub-dispatcher node with domain::
>>> def my_domain(kwargs):
... return kwargs['C'] > 3
...
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher with domain',
... dsp=sub_dsp, inputs={'C': 'a', 'D': 'b'},
... outputs={('c', 'b'): ('E', 'E1')},
... input_domain=my_domain)
'Sub-Dispatcher with domain'
"""
from .utils.blue import _init
dsp = _init(dsp)
if not isinstance(dsp, self.__class__):
kw = dsp
dsp = self.__class__(
name=dsp_id or 'unknown',
executor=self.executor
)
dsp.add_from_lists(**kw)
if not dsp_id: # Get the dsp id.
dsp_id = dsp.name or 'unknown'
if description is None: # Get description.
description = dsp.__doc__ or None
if not isinstance(inputs, dict): # Create the inputs dict.
inputs = kk_dict(*inputs)
if not isinstance(outputs, dict): # Create the outputs dict.
outputs = kk_dict(*outputs)
# Set zero as default input distances.
# noinspection PyTypeChecker
_weight_from = dict.fromkeys(inputs.keys(), 0.0)
_weight_from.update(inp_weight or {})
from .utils.alg import _nodes
# Return dispatcher node id.
dsp_id = self.add_function(
dsp_id, dsp, sorted(_nodes(inputs)),
sorted(_nodes(outputs.values())), input_domain, weight,
_weight_from, type='dispatcher', description=description,
wait_inputs=False, await_domain=await_domain, **kwargs
)
# Set proper inputs.
self.nodes[dsp_id]['inputs'] = inputs
# Set proper outputs.
self.nodes[dsp_id]['outputs'] = outputs
if SINK not in dsp.nodes and \
SINK in _nodes(inputs.values()).union(_nodes(outputs)):
dsp.add_data(SINK) # Add sink node.
# Import default values from sub-dispatcher.
if include_defaults:
dsp_dfl = dsp.default_values # Namespace shortcut.
remove = set() # Set of nodes to remove after the import.
# Set default values.
for k, v in inputs.items():
if isinstance(v, str):
if v in dsp_dfl:
self.set_default_value(k, **dsp_dfl.pop(v))
else:
if v[0] in dsp_dfl:
self.set_default_value(k, **dsp_dfl.pop(v[0]))
remove.update(v[1:])
# Remove default values.
for k in remove:
dsp_dfl.pop(k, None)
return dsp_id | 0.001517 |
def remove_origin(self, account_id, origin_id):
"""Removes an origin pull mapping with the given origin pull ID.
:param int account_id: the CDN account ID from which the mapping should
be deleted.
:param int origin_id: the origin pull mapping ID to delete.
"""
return self.account.deleteOriginPullRule(origin_id, id=account_id) | 0.005 |
def syscall_clearremovequeue(queue, index):
'''
Clear the subqueue `queue[index]` and remove it from queue.
'''
def _syscall(scheduler, processor):
qes, qees = queue[index].clear()
events = scheduler.queue.unblockqueue(queue[index])
for e in events:
scheduler.eventtree.remove(e)
qes2, qees2 = queue.removeSubQueue(index)
for e in qes:
processor(e)
for e in qes2:
processor(e)
for e in qees:
processor(e)
for e in qees2:
processor(e)
return _syscall | 0.001686 |
def create(self, project_id=None):
"""Creates the bucket.
Args:
project_id: the project in which to create the bucket.
Returns:
The bucket.
Raises:
Exception if there was an error creating the bucket.
"""
if not self.exists():
if project_id is None:
project_id = self._api.project_id
try:
self._info = self._api.buckets_insert(self._name, project_id=project_id)
except Exception as e:
raise e
return self | 0.010142 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.