text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def sigterm(self) -> None:
'''Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish.'''
if self.stop_attempts < 1:
Log.info('received SIGTERM, gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
else:
Log.info('received SIGTERM, bravely waiting for tasks') | 0.004662 |
def calc_el_lz_v1(self):
"""Calculate lake evaporation.
Required control parameters:
|NmbZones|
|ZoneType|
|TTIce|
Required derived parameters:
|RelZoneArea|
Required fluxes sequences:
|TC|
|EPC|
Updated state sequence:
|LZ|
Basic equations:
:math:`\\frac{dLZ}{dt} = -EL` \n
:math:`EL = \\Bigl \\lbrace
{
{EPC \\ | \\ TC > TTIce}
\\atop
{0 \\ | \\ TC \\leq TTIce}
}`
Examples:
Six zones of the same size are initialized. The first three
zones are no internal lakes, they can not exhibit any lake
evaporation. Of the last three zones, which are internal lakes,
only the last one evaporates water. For zones five and six,
evaporation is suppressed due to an assumed ice layer, whenever
the associated theshold temperature is not exceeded:
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(6)
>>> zonetype(FIELD, FOREST, GLACIER, ILAKE, ILAKE, ILAKE)
>>> ttice(-1.0)
>>> derived.relzonearea = 1.0/6.0
>>> fluxes.epc = 0.6
>>> fluxes.tc = 0.0, 0.0, 0.0, 0.0, -1.0, -2.0
>>> states.lz = 10.0
>>> model.calc_el_lz_v1()
>>> fluxes.el
el(0.0, 0.0, 0.0, 0.6, 0.0, 0.0)
>>> states.lz
lz(9.9)
Note that internal lakes always contain water. Hence, the
HydPy-H-Land model allows for negative values of the lower
zone storage:
>>> states.lz = 0.05
>>> model.calc_el_lz_v1()
>>> fluxes.el
el(0.0, 0.0, 0.0, 0.6, 0.0, 0.0)
>>> states.lz
lz(-0.05)
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
sta = self.sequences.states.fastaccess
for k in range(con.nmbzones):
if (con.zonetype[k] == ILAKE) and (flu.tc[k] > con.ttice[k]):
flu.el[k] = flu.epc[k]
sta.lz -= der.relzonearea[k]*flu.el[k]
else:
flu.el[k] = 0. | 0.000464 |
def QA_util_if_tradetime(
_time=datetime.datetime.now(),
market=MARKET_TYPE.STOCK_CN,
code=None
):
'时间是否交易'
_time = datetime.datetime.strptime(str(_time)[0:19], '%Y-%m-%d %H:%M:%S')
if market is MARKET_TYPE.STOCK_CN:
if QA_util_if_trade(str(_time.date())[0:10]):
if _time.hour in [10, 13, 14]:
return True
elif _time.hour in [
9
] and _time.minute >= 15: # 修改成9:15 加入 9:15-9:30的盘前竞价时间
return True
elif _time.hour in [11] and _time.minute <= 30:
return True
else:
return False
else:
return False
elif market is MARKET_TYPE.FUTURE_CN:
date_today=str(_time.date())
date_yesterday=str((_time-datetime.timedelta(days=1)).date())
is_today_open=QA_util_if_trade(date_today)
is_yesterday_open=QA_util_if_trade(date_yesterday)
#考虑周六日的期货夜盘情况
if is_today_open==False: #可能是周六或者周日
if is_yesterday_open==False or (_time.hour > 2 or _time.hour == 2 and _time.minute > 30):
return False
shortName = "" # i , p
for i in range(len(code)):
ch = code[i]
if ch.isdigit(): # ch >= 48 and ch <= 57:
break
shortName += code[i].upper()
period = [
[9, 0, 10, 15],
[10, 30, 11, 30],
[13, 30, 15, 0]
]
if (shortName in ["IH", 'IF', 'IC']):
period = [
[9, 30, 11, 30],
[13, 0, 15, 0]
]
elif (shortName in ["T", "TF"]):
period = [
[9, 15, 11, 30],
[13, 0, 15, 15]
]
if 0<=_time.weekday<=4:
for i in range(len(period)):
p = period[i]
if ((_time.hour > p[0] or (_time.hour == p[0] and _time.minute >= p[1])) and (_time.hour < p[2] or (_time.hour == p[2] and _time.minute < p[3]))):
return True
#最新夜盘时间表_2019.03.29
nperiod = [
[
['AU', 'AG', 'SC'],
[21, 0, 2, 30]
],
[
['CU', 'AL', 'ZN', 'PB', 'SN', 'NI'],
[21, 0, 1, 0]
],
[
['RU', 'RB', 'HC', 'BU','FU','SP'],
[21, 0, 23, 0]
],
[
['A', 'B', 'Y', 'M', 'JM', 'J', 'P', 'I', 'L', 'V', 'PP', 'EG', 'C', 'CS'],
[21, 0, 23, 0]
],
[
['SR', 'CF', 'RM', 'MA', 'TA', 'ZC', 'FG', 'IO', 'CY'],
[21, 0, 23, 30]
],
]
for i in range(len(nperiod)):
for j in range(len(nperiod[i][0])):
if nperiod[i][0][j] == shortName:
p = nperiod[i][1]
condA = _time.hour > p[0] or (_time.hour == p[0] and _time.minute >= p[1])
condB = _time.hour < p[2] or (_time.hour == p[2] and _time.minute < p[3])
# in one day
if p[2] >= p[0]:
if ((_time.weekday >= 0 and _time.weekday <= 4) and condA and condB):
return True
else:
if (((_time.weekday >= 0 and _time.weekday <= 4) and condA) or ((_time.weekday >= 1 and _time.weekday <= 5) and condB)):
return True
return False
return False | 0.008904 |
def extractall(archive, filename, dstdir):
""" extract zip or tar content to dstdir"""
if zipfile.is_zipfile(archive):
z = zipfile.ZipFile(archive)
for name in z.namelist():
targetname = name
# directories ends with '/' (on Windows as well)
if targetname.endswith('/'):
targetname = targetname[:-1]
# don't include leading "/" from file name if present
if targetname.startswith(os.path.sep):
targetname = os.path.join(dstdir, targetname[1:])
else:
targetname = os.path.join(dstdir, targetname)
targetname = os.path.normpath(targetname)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetname)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
# directories ends with '/' (on Windows as well)
if not name.endswith('/'):
# copy file
file(targetname, 'wb').write(z.read(name))
elif tarfile.is_tarfile(archive):
tar = tarfile.open(archive)
tar.extractall(path=dstdir)
else:
# seems to be a single file, save it
shutil.copyfile(archive, os.path.join(dstdir, filename)) | 0.002974 |
def properties_operator(cls, name):
"""Wraps a container operator to ensure container class is maintained"""
def wrapper(self, *args, **kwargs):
"""Perform operation and cast to container class"""
output = getattr(super(cls, self), name)(*args, **kwargs)
return cls(output)
wrapped = getattr(cls, name)
wrapper.__name__ = wrapped.__name__
wrapper.__doc__ = wrapped.__doc__
return wrapper | 0.002288 |
def _setup_stats_plugins(self):
'''
Sets up the plugin stats collectors
'''
self.stats_dict['plugins'] = {}
for key in self.plugins_dict:
plugin_name = self.plugins_dict[key]['instance'].__class__.__name__
temp_key = 'stats:redis-monitor:{p}'.format(p=plugin_name)
self.stats_dict['plugins'][plugin_name] = {}
for item in self.settings['STATS_TIMES']:
try:
time = getattr(StatsCollector, item)
self.stats_dict['plugins'][plugin_name][time] = StatsCollector \
.get_rolling_time_window(
redis_conn=self.redis_conn,
key='{k}:{t}'.format(k=temp_key, t=time),
window=time,
cycle_time=self.settings['STATS_CYCLE'])
self.logger.debug("Set up {p} plugin Stats Collector '{i}'"\
.format(p=plugin_name, i=item))
except AttributeError as e:
self.logger.warning("Unable to find Stats Time '{s}'"\
.format(s=item))
total = StatsCollector.get_hll_counter(redis_conn=self.redis_conn,
key='{k}:lifetime'.format(k=temp_key),
cycle_time=self.settings['STATS_CYCLE'],
roll=False)
self.logger.debug("Set up {p} plugin Stats Collector 'lifetime'"\
.format(p=plugin_name))
self.stats_dict['plugins'][plugin_name]['lifetime'] = total | 0.008299 |
def with_wait_cursor(func):
"""
Show a wait cursor while the wrapped function is running. The cursor is
restored as soon as the function exits.
:param func: wrapped function
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
QApplication.setOverrideCursor(
QCursor(Qt.WaitCursor))
try:
ret_val = func(*args, **kwargs)
finally:
QApplication.restoreOverrideCursor()
return ret_val
return wrapper | 0.002 |
def start_raylet(redis_address,
node_ip_address,
raylet_name,
plasma_store_name,
worker_path,
temp_dir,
num_cpus=None,
num_gpus=None,
resources=None,
object_manager_port=None,
node_manager_port=None,
redis_password=None,
use_valgrind=False,
use_profiler=False,
stdout_file=None,
stderr_file=None,
config=None,
include_java=False,
java_worker_options=None,
load_code_from_local=False):
"""Start a raylet, which is a combined local scheduler and object manager.
Args:
redis_address (str): The address of the primary Redis server.
node_ip_address (str): The IP address of this node.
raylet_name (str): The name of the raylet socket to create.
plasma_store_name (str): The name of the plasma store socket to connect
to.
worker_path (str): The path of the Python file that new worker
processes will execute.
temp_dir (str): The path of the temporary directory Ray will use.
num_cpus: The CPUs allocated for this raylet.
num_gpus: The GPUs allocated for this raylet.
resources: The custom resources allocated for this raylet.
object_manager_port: The port to use for the object manager. If this is
None, then the object manager will choose its own port.
node_manager_port: The port to use for the node manager. If this is
None, then the node manager will choose its own port.
redis_password: The password to use when connecting to Redis.
use_valgrind (bool): True if the raylet should be started inside
of valgrind. If this is True, use_profiler must be False.
use_profiler (bool): True if the raylet should be started inside
a profiler. If this is True, use_valgrind must be False.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
config (dict|None): Optional Raylet configuration that will
override defaults in RayConfig.
include_java (bool): If True, the raylet backend can also support
Java worker.
java_worker_options (str): The command options for Java worker.
Returns:
ProcessInfo for the process that was started.
"""
config = config or {}
config_str = ",".join(["{},{}".format(*kv) for kv in config.items()])
if use_valgrind and use_profiler:
raise Exception("Cannot use valgrind and profiler at the same time.")
num_initial_workers = (num_cpus if num_cpus is not None else
multiprocessing.cpu_count())
static_resources = check_and_update_resources(num_cpus, num_gpus,
resources)
# Limit the number of workers that can be started in parallel by the
# raylet. However, make sure it is at least 1.
num_cpus_static = static_resources.get("CPU", 0)
maximum_startup_concurrency = max(
1, min(multiprocessing.cpu_count(), num_cpus_static))
# Format the resource argument in a form like 'CPU,1.0,GPU,0,Custom,3'.
resource_argument = ",".join(
["{},{}".format(*kv) for kv in static_resources.items()])
gcs_ip_address, gcs_port = redis_address.split(":")
if include_java is True:
java_worker_options = (java_worker_options
or DEFAULT_JAVA_WORKER_OPTIONS)
java_worker_command = build_java_worker_command(
java_worker_options,
redis_address,
plasma_store_name,
raylet_name,
redis_password,
os.path.join(temp_dir, "sockets"),
)
else:
java_worker_command = ""
# Create the command that the Raylet will use to start workers.
start_worker_command = ("{} {} "
"--node-ip-address={} "
"--object-store-name={} "
"--raylet-name={} "
"--redis-address={} "
"--temp-dir={}".format(
sys.executable, worker_path, node_ip_address,
plasma_store_name, raylet_name, redis_address,
temp_dir))
if redis_password:
start_worker_command += " --redis-password {}".format(redis_password)
# If the object manager port is None, then use 0 to cause the object
# manager to choose its own port.
if object_manager_port is None:
object_manager_port = 0
# If the node manager port is None, then use 0 to cause the node manager
# to choose its own port.
if node_manager_port is None:
node_manager_port = 0
if load_code_from_local:
start_worker_command += " --load-code-from-local "
command = [
RAYLET_EXECUTABLE,
"--raylet_socket_name={}".format(raylet_name),
"--store_socket_name={}".format(plasma_store_name),
"--object_manager_port={}".format(object_manager_port),
"--node_manager_port={}".format(node_manager_port),
"--node_ip_address={}".format(node_ip_address),
"--redis_address={}".format(gcs_ip_address),
"--redis_port={}".format(gcs_port),
"--num_initial_workers={}".format(num_initial_workers),
"--maximum_startup_concurrency={}".format(maximum_startup_concurrency),
"--static_resource_list={}".format(resource_argument),
"--config_list={}".format(config_str),
"--python_worker_command={}".format(start_worker_command),
"--java_worker_command={}".format(java_worker_command),
"--redis_password={}".format(redis_password or ""),
"--temp_dir={}".format(temp_dir),
]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_RAYLET,
use_valgrind=use_valgrind,
use_gdb=False,
use_valgrind_profiler=use_profiler,
use_perftools_profiler=("RAYLET_PERFTOOLS_PATH" in os.environ),
stdout_file=stdout_file,
stderr_file=stderr_file)
return process_info | 0.000152 |
def get_temperature(self):
"""Get current temperature in celsius."""
try:
request = requests.get(
'{}/temp'.format(self.resource), timeout=self.timeout, allow_redirects=False)
self.temperature = request.json()['compensated']
return self.temperature
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
except ValueError:
raise exceptions.MyStromNotVersionTwoSwitch() | 0.005917 |
def describe_consumer_groups(self, group_ids, group_coordinator_id=None):
"""Describe a set of consumer groups.
Any errors are immediately raised.
:param group_ids: A list of consumer group IDs. These are typically the
group names as strings.
:param group_coordinator_id: The node_id of the groups' coordinator
broker. If set to None, it will query the cluster for each group to
find that group's coordinator. Explicitly specifying this can be
useful for avoiding extra network round trips if you already know
the group coordinator. This is only useful when all the group_ids
have the same coordinator, otherwise it will error. Default: None.
:return: A list of group descriptions. For now the group descriptions
are the raw results from the DescribeGroupsResponse. Long-term, we
plan to change this to return namedtuples as well as decoding the
partition assignments.
"""
group_descriptions = []
version = self._matching_api_version(DescribeGroupsRequest)
for group_id in group_ids:
if group_coordinator_id is not None:
this_groups_coordinator_id = group_coordinator_id
else:
this_groups_coordinator_id = self._find_group_coordinator_id(group_id)
if version <= 1:
# Note: KAFKA-6788 A potential optimization is to group the
# request per coordinator and send one request with a list of
# all consumer groups. Java still hasn't implemented this
# because the error checking is hard to get right when some
# groups error and others don't.
request = DescribeGroupsRequest[version](groups=(group_id,))
response = self._send_request_to_node(this_groups_coordinator_id, request)
assert len(response.groups) == 1
# TODO need to implement converting the response tuple into
# a more accessible interface like a namedtuple and then stop
# hardcoding tuple indices here. Several Java examples,
# including KafkaAdminClient.java
group_description = response.groups[0]
error_code = group_description[0]
error_type = Errors.for_code(error_code)
# Java has the note: KAFKA-6789, we can retry based on the error code
if error_type is not Errors.NoError:
raise error_type(
"Request '{}' failed with response '{}'."
.format(request, response))
# TODO Java checks the group protocol type, and if consumer
# (ConsumerProtocol.PROTOCOL_TYPE) or empty string, it decodes
# the members' partition assignments... that hasn't yet been
# implemented here so just return the raw struct results
group_descriptions.append(group_description)
else:
raise NotImplementedError(
"Support for DescribeGroups v{} has not yet been added to KafkaAdminClient."
.format(version))
return group_descriptions | 0.001811 |
def checkformat(self):
"""************************************************************************************************************************************************************
Task: checks the format of the bed file. The only requirements checked are that each line presents at least 3 tab separated columns, the
two on the right must present integer values indicating the start/end position respectively. Right value must be greater than the
left value.
Outputs:
err: string containing the detected error. Empty string in case of a correct format.
************************************************************************************************************************************************************"""
fd = open_gzipsafe(self.filename)
line = fd.readline()
while line.startswith('#'):
line = fd.readline()
fields = line.split('\t')
lc = 1
error = ''
# Checks that the two columns on the right contain integer values
try:
# Parses each line and checks that there are at least 3 fields, the two on the right containing integer values and being the right one
# greater than the left one
while line != '' and len(fields) > 2 and int(fields[1]) <= int(fields[2]):
lc += 1
line = fd.readline()
fields = line.split('\t')
except ValueError:
error += 'Incorrect start/end values at line ' + str(lc) + '\n'
error += 'Start/End coordinates must be indicated with integer values. The right value must be greater than the left value.\n'
error += 'Line found: ' + line
fd.close()
return error
# If it get to this point means that either the file ended or there is a line with less than 3 fields
if line != '':
error += 'Incorrect line format at line ' + str(lc) + '\n'
error += 'At least three columns are expected in each line\n'
error += 'The right value must be greater than the left value.\n'
error += 'Line found: ' + line
fd.close()
return error | 0.004898 |
def get_executor(self, create=1):
"""Fetch the action executor for this node. Create one if
there isn't already one, and requested to do so."""
try:
executor = self.executor
except AttributeError:
if not create:
raise
try:
act = self.builder.action
except AttributeError:
executor = SCons.Executor.Null(targets=[self])
else:
executor = SCons.Executor.Executor(act,
self.env or self.builder.env,
[self.builder.overrides],
[self],
self.sources)
self.executor = executor
return executor | 0.003472 |
def _validate_rrsig(rrset, rrsig, keys, origin=None, now=None):
"""Validate an RRset against a single signature rdata
The owner name of the rrsig is assumed to be the same as the owner name
of the rrset.
@param rrset: The RRset to validate
@type rrset: dns.rrset.RRset or (dns.name.Name, dns.rdataset.Rdataset)
tuple
@param rrsig: The signature rdata
@type rrsig: dns.rrset.Rdata
@param keys: The key dictionary.
@type keys: a dictionary keyed by dns.name.Name with node or rdataset values
@param origin: The origin to use for relative names
@type origin: dns.name.Name or None
@param now: The time to use when validating the signatures. The default
is the current time.
@type now: int
"""
if isinstance(origin, (str, unicode)):
origin = dns.name.from_text(origin, dns.name.root)
key = _find_key(keys, rrsig)
if not key:
raise ValidationFailure, 'unknown key'
# For convenience, allow the rrset to be specified as a (name, rdataset)
# tuple as well as a proper rrset
if isinstance(rrset, tuple):
rrname = rrset[0]
rdataset = rrset[1]
else:
rrname = rrset.name
rdataset = rrset
if now is None:
now = time.time()
if rrsig.expiration < now:
raise ValidationFailure, 'expired'
if rrsig.inception > now:
raise ValidationFailure, 'not yet valid'
hash = _make_hash(rrsig.algorithm)
if _is_rsa(rrsig.algorithm):
keyptr = key.key
(bytes,) = struct.unpack('!B', keyptr[0:1])
keyptr = keyptr[1:]
if bytes == 0:
(bytes,) = struct.unpack('!H', keyptr[0:2])
keyptr = keyptr[2:]
rsa_e = keyptr[0:bytes]
rsa_n = keyptr[bytes:]
keylen = len(rsa_n) * 8
pubkey = Crypto.PublicKey.RSA.construct(
(Crypto.Util.number.bytes_to_long(rsa_n),
Crypto.Util.number.bytes_to_long(rsa_e)))
sig = (Crypto.Util.number.bytes_to_long(rrsig.signature),)
elif _is_dsa(rrsig.algorithm):
keyptr = key.key
(t,) = struct.unpack('!B', keyptr[0:1])
keyptr = keyptr[1:]
octets = 64 + t * 8
dsa_q = keyptr[0:20]
keyptr = keyptr[20:]
dsa_p = keyptr[0:octets]
keyptr = keyptr[octets:]
dsa_g = keyptr[0:octets]
keyptr = keyptr[octets:]
dsa_y = keyptr[0:octets]
pubkey = Crypto.PublicKey.DSA.construct(
(Crypto.Util.number.bytes_to_long(dsa_y),
Crypto.Util.number.bytes_to_long(dsa_g),
Crypto.Util.number.bytes_to_long(dsa_p),
Crypto.Util.number.bytes_to_long(dsa_q)))
(dsa_r, dsa_s) = struct.unpack('!20s20s', rrsig.signature[1:])
sig = (Crypto.Util.number.bytes_to_long(dsa_r),
Crypto.Util.number.bytes_to_long(dsa_s))
else:
raise ValidationFailure, 'unknown algorithm %u' % rrsig.algorithm
hash.update(_to_rdata(rrsig, origin)[:18])
hash.update(rrsig.signer.to_digestable(origin))
if rrsig.labels < len(rrname) - 1:
suffix = rrname.split(rrsig.labels + 1)[1]
rrname = dns.name.from_text('*', suffix)
rrnamebuf = rrname.to_digestable(origin)
rrfixed = struct.pack('!HHI', rdataset.rdtype, rdataset.rdclass,
rrsig.original_ttl)
rrlist = sorted(rdataset);
for rr in rrlist:
hash.update(rrnamebuf)
hash.update(rrfixed)
rrdata = rr.to_digestable(origin)
rrlen = struct.pack('!H', len(rrdata))
hash.update(rrlen)
hash.update(rrdata)
digest = hash.digest()
if _is_rsa(rrsig.algorithm):
# PKCS1 algorithm identifier goop
digest = _make_algorithm_id(rrsig.algorithm) + digest
padlen = keylen // 8 - len(digest) - 3
digest = chr(0) + chr(1) + chr(0xFF) * padlen + chr(0) + digest
elif _is_dsa(rrsig.algorithm):
pass
else:
# Raise here for code clarity; this won't actually ever happen
# since if the algorithm is really unknown we'd already have
# raised an exception above
raise ValidationFailure, 'unknown algorithm %u' % rrsig.algorithm
if not pubkey.verify(digest, sig):
raise ValidationFailure, 'verify failure' | 0.002097 |
def _get_data(filenames):
"""Read data from file(s) or STDIN.
Args:
filenames (list): List of files to read to get data. If empty or
None, read from STDIN.
"""
if filenames:
data = ""
for filename in filenames:
with open(filename, "rb") as f:
data += f.read()
else:
data = sys.stdin.read()
return data | 0.002506 |
def create_normal_matrix(self, modelview):
"""
Creates a normal matrix from modelview matrix
Args:
modelview: The modelview matrix
Returns:
A 3x3 Normal matrix as a :py:class:`numpy.array`
"""
normal_m = Matrix33.from_matrix44(modelview)
normal_m = normal_m.inverse
normal_m = normal_m.transpose()
return normal_m | 0.004866 |
def get_collection(self, path):
"""To get pagewise data."""
while True:
items = self.get(path)
req = self.req
for item in items:
yield item
if req.links and req.links['next'] and\
req.links['next']['rel'] == 'next':
path = req.links['next']['url']
else:
break | 0.004963 |
def bind(self, func: Callable[[Any], IO]) -> IO:
"""IO a -> (a -> IO b) -> IO b"""
g = self._value
return Get(lambda text: g(text).bind(func)) | 0.011976 |
def access_SUSY_dataset_format_file(filename):
"""
This function accesses a CSV file containing data of the form of the [SUSY
dataset](https://archive.ics.uci.edu/ml/datasets/SUSY), i.e. with the first
column being class labels and other columns being features.
"""
# Load the CSV file to a list.
with open(filename, "rb") as dataset_file:
dataset_CSV = [row for row in csv.reader(dataset_file, delimiter = ",")]
# Reorganise the data.
return [
i for i in itertools.chain(*[list((element[1:],
[int(float(element[0]))])) for element in dataset_CSV])
] | 0.007886 |
def _parse_samples(self, io_bytes):
"""
_parse_samples: binary data in XBee IO data format ->
[ {"dio-0":True,
"dio-1":False,
"adc-0":100"}, ...]
_parse_samples reads binary data from an XBee device in the IO
data format specified by the API. It will then return a
dictionary indicating the status of each enabled IO port.
"""
sample_count, dio_chans, aio_chans, dio_mask, header_size = \
self._parse_samples_header(io_bytes)
samples = []
# split the sample data into a list, so it can be pop()'d
sample_bytes = [byteToInt(c) for c in io_bytes[header_size:]]
# repeat for every sample provided
for sample_ind in range(0, sample_count):
tmp_samples = {}
if dio_chans:
# we have digital data
digital_data_set = (sample_bytes.pop(0) << 8 |
sample_bytes.pop(0))
digital_values = dio_mask & digital_data_set
for i in dio_chans:
tmp_samples['dio-{0}'.format(i)] = True \
if (digital_values >> i) & 1 else False
for i in aio_chans:
analog_sample = (sample_bytes.pop(0) << 8 | sample_bytes.pop(0))
tmp_samples['adc-{0}'.format(i)] = analog_sample
samples.append(tmp_samples)
return samples | 0.001988 |
def _get_parser(self, env):
""" Creates base argument parser.
`env`
Runtime ``Environment`` instance.
* Raises ``HelpBanner`` exception when certain conditions apply.
Returns ``FocusArgumentParser`` object.
"""
version_str = 'focus version ' + __version__
usage_str = 'focus [-h] [-v] [--no-color] <command> [<args>]'
# setup parser
parser = FocusArgParser(description=("Command-line productivity tool "
"for improved task workflows."),
epilog=("See 'focus help <command>' for more "
"information on a specific command."),
usage=usage_str)
parser.add_argument('-v', '--version', action='version',
version=version_str)
parser.add_argument('--no-color', action='store_true',
help='disables colors')
# fetch command plugins
commands = []
active = env.task.active
command_hooks = registration.get_registered(command_hooks=True,
task_active=active)
# extract command name and docstrings as help text
for plugin in command_hooks:
help_text = (plugin.__doc__ or '').strip().rstrip('.').lower()
commands.append((plugin.command, help_text))
commands.sort(key=lambda x: x[0]) # command ordered
# install subparsers
subparsers = parser.add_subparsers(title='available commands')
# install 'help' subparser
help_parser = subparsers.add_parser('help', add_help=False)
help_parser.set_defaults(func=self._handle_help)
# install 'version' subparser
version_parser = subparsers.add_parser('version', add_help=False)
def _print_version(env, args):
env.io.write(version_str)
return True
version_parser.set_defaults(func=_print_version)
# install command subparsers based on registered command plugins.
# this allows for focus commands (e.g. focus on [...])
for command, help_ in commands:
cmd_parser = subparsers.add_parser(command, help=help_,
add_help=False)
# use wrapper to bind command value and passthru to _handle_command
# when executed later
def _run(command):
def _wrapper(env, args):
return self._handle_command(command, env, args)
return _wrapper
cmd_parser.set_defaults(func=_run(command))
return parser | 0.000725 |
def scan(self, M):
"""
LML, fixed-effect sizes, and scale of the candidate set.
Parameters
----------
M : array_like
Fixed-effects set.
Returns
-------
lml : float
Log of the marginal likelihood.
effsizes0 : ndarray
Covariates fixed-effect sizes.
effsizes0_se : ndarray
Covariates fixed-effect size standard errors.
effsizes1 : ndarray
Candidate set fixed-effect sizes.
effsizes1_se : ndarray
Candidate fixed-effect size standard errors.
scale : ndarray
Optimal scale.
"""
from numpy_sugar.linalg import ddot
from numpy_sugar import is_all_finite
M = asarray(M, float)
if M.shape[1] == 0:
return {
"lml": self.null_lml(),
"effsizes0": self.null_beta,
"effsizes0_se": self.null_beta_se,
"effsizes1": empty((0)),
"effsizes1_se": empty((0)),
"scale": self.null_scale,
}
if not is_all_finite(M):
raise ValueError("M parameter has non-finite elements.")
MTQ = [dot(M.T, Q) for Q in self._QS[0] if Q.size > 0]
yTBM = [dot(i, j.T) for (i, j) in zip(self._yTQDi, MTQ)]
XTBM = [dot(i, j.T) for (i, j) in zip(self._XTQDi, MTQ)]
D = self._D
MTBM = [ddot(i, 1 / j) @ i.T for i, j in zip(MTQ, D) if j.min() > 0]
return self._multicovariate_set(yTBM, XTBM, MTBM) | 0.001273 |
def get_matching_multiplex_port(self,name):
"""
Given a name, figure out if a multiplex port prefixes this name and return it. Otherwise return none.
"""
# short circuit: if the attribute name already exists return none
# if name in self._portnames: return None
# if not len([p for p in self._portnames if name.startswith(p) and name != p]): return None
matching_multiplex_ports = [self.__getattribute__(p) for p in self._portnames
if name.startswith(p)
and name != p
and hasattr(self, p)
and self.__getattribute__(p).is_multiplex
]
for port in matching_multiplex_ports:
return port
return None | 0.009459 |
def port_profile_vlan_profile_switchport_mode_vlan_mode(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
mode = ET.SubElement(switchport, "mode")
vlan_mode = ET.SubElement(mode, "vlan-mode")
vlan_mode.text = kwargs.pop('vlan_mode')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004082 |
def verify_token_string(self,
token_string,
action=None,
timeout=None,
current_time=None):
"""Generate a hash of the given token contents that can be verified.
:param token_string:
A string containing the hashed token (generated by
`generate_token_string`).
:param action:
A string containing the action that is being verified.
:param timeout:
An int or float representing the number of seconds that the token
is valid for. If None then tokens are valid forever.
:current_time:
An int representing the number of seconds since the epoch. Will be
used by to check for token expiry if `timeout` is set. If `None`
then the current time will be used.
:raises:
XSRFTokenMalformed if the given token_string cannot be parsed.
XSRFTokenExpiredException if the given token string is expired.
XSRFTokenInvalid if the given token string does not match the
contents of the `XSRFToken`.
"""
try:
decoded_token_string = base64.urlsafe_b64decode(token_string)
except TypeError:
raise XSRFTokenMalformed()
split_token = decoded_token_string.split(self._DELIMITER)
if len(split_token) != 2:
raise XSRFTokenMalformed()
try:
token_time = int(split_token[1])
except ValueError:
raise XSRFTokenMalformed()
if timeout is not None:
if current_time is None:
current_time = time.time()
# If an attacker modifies the plain text time then it will not match
# the hashed time so this check is sufficient.
if (token_time + timeout) < current_time:
raise XSRFTokenExpiredException()
expected_token = XSRFToken(self.user_id, self.secret, token_time)
expected_token_string = expected_token.generate_token_string(action)
if len(expected_token_string) != len(token_string):
raise XSRFTokenInvalid()
# Compare the two strings in constant time to prevent timing attacks.
different = 0
for a, b in zip(token_string, expected_token_string):
different |= ord(a) ^ ord(b)
if different:
raise XSRFTokenInvalid() | 0.007771 |
def get_file_extension(filepath):
"""
Copy if anyconfig.utils.get_file_extension is not available.
>>> get_file_extension("/a/b/c")
''
>>> get_file_extension("/a/b.txt")
'txt'
>>> get_file_extension("/a/b/c.tar.xz")
'xz'
"""
_ext = os.path.splitext(filepath)[-1]
if _ext:
return _ext[1:] if _ext.startswith('.') else _ext
return '' | 0.002571 |
def get_app_template_dir(app_name):
"""Get the template directory for an application
Uses apps interface available in django 1.7+
Returns a full path, or None if the app was not found.
"""
if app_name in _cache:
return _cache[app_name]
template_dir = None
for app in apps.get_app_configs():
if app.label == app_name:
template_dir = join(app.path, 'templates')
break
_cache[app_name] = template_dir
return template_dir | 0.00202 |
def render_value(self, value, **options):
"""Render value"""
renderer = self.renderers.get(type(value), lambda value, **options: value)
return renderer(value, **options) | 0.015544 |
def _repeat(self, index, stage, stop):
""" Repeat a stage.
:param index: Stage index.
:param stage: Stage object to repeat.
:param iterations: Number of iterations (default infinite).
:param stages: Stages back to repeat (default 1).
"""
times = None
if 'iterations' in stage.kwargs:
times = stage.kwargs['iterations'] - 1
stages_back = 1
if 'stages' in stage.kwargs:
stages_back = stage.kwargs['stages']
i = 0
while i != times:
if stop.is_set():
break
for forward in range(stages_back):
if stop.is_set():
break
stage_index = index - stages_back + forward
self._execute_stage(stage_index, self._pipe[stage_index], stop)
i += 1 | 0.002307 |
def cache_func(func, duration=conf.GOSCALE_CACHE_DURATION, cache_key=None):
"""Django cache decorator for functions
Basic ideas got from:
- http://djangosnippets.org/snippets/492/
- http://djangosnippets.org/snippets/564/
Example usage:
Example 1:
- providing a cache key and a duration
class MenuItem(models.Model):
@classmethod
@cache_func(, 3600*24, 'menu_root')
def get_root(self):
return MenuItem.objects.get(pk=1)
Example 2:
- providing a dynamic cache key and a duration
@cache_func(3600, lambda u: 'user_privileges_%s' % u.username,)
def get_user_privileges(user):
#...
Example 3:
- providing only a duration of the cache, if no cache is provided it will
be auto-generated
@cache_func(1800)
def get_page_by_absolute_url(url):
#...
"""
def do_cache(*args, **kwargs):
alternative_cache_key = '%s.%s' % (func.__module__, func.__name__)
key = get_cache_key(cache_key, alternative_cache_key, *args, **kwargs)
data = cache.get(key)
if data:
return data
data = func(*args, **kwargs)
#in case a function is not False (implicit), so object couldn't be retrieved
cache_duration = duration
if not data:
cache_duration = 1
cache.set(key, data, cache_duration)
return data
return do_cache | 0.005548 |
def _backsearch(self):
"""
Inspect previous peaks from the last detected qrs peak (if any),
using a lower threshold
"""
if self.last_qrs_peak_num is not None:
for peak_num in range(self.last_qrs_peak_num + 1, self.peak_num + 1):
if self._is_qrs(peak_num=peak_num, backsearch=True):
self._update_qrs(peak_num=peak_num, backsearch=True) | 0.007092 |
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
"""
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
"""
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin) | 0.002256 |
def _expand(self, normalization, csphase, **kwargs):
"""Expand the grid into real spherical harmonics."""
if normalization.lower() == '4pi':
norm = 1
elif normalization.lower() == 'schmidt':
norm = 2
elif normalization.lower() == 'unnorm':
norm = 3
elif normalization.lower() == 'ortho':
norm = 4
else:
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value was {:s}."
.format(repr(normalization))
)
cilm = _shtools.SHExpandDH(self.data, norm=norm, csphase=csphase,
sampling=self.sampling,
**kwargs)
coeffs = SHCoeffs.from_array(cilm,
normalization=normalization.lower(),
csphase=csphase, copy=False)
return coeffs | 0.001996 |
def as_xml(self,parent):
"""Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode`"""
n=parent.newChild(None,self.name.upper(),None)
if self.type:
n.newTextChild(None,"TYPE",self.type)
n.newTextChild(None,"CRED",binascii.b2a_base64(self.cred))
return n | 0.019305 |
def buildSources(self, sourceTime=None):
"""
Return a dictionary of date/time tuples based on the keys
found in self.re_sources.
The current time is used as the default and any specified
item found in self.re_sources is inserted into the value
and the generated dictionary is returned.
"""
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
sources = {}
defaults = { 'yr': yr, 'mth': mth, 'dy': dy,
'hr': hr, 'mn': mn, 'sec': sec, }
for item in self.re_sources:
values = {}
source = self.re_sources[item]
for key in defaults.keys():
if key in source:
values[key] = source[key]
else:
values[key] = defaults[key]
sources[item] = ( values['yr'], values['mth'], values['dy'],
values['hr'], values['mn'], values['sec'], wd, yd, isdst )
return sources | 0.006098 |
def Sample(self, operation, description, data_size, compressed_data_size):
"""Takes a sample of data read or written for profiling.
Args:
operation (str): operation, either 'read' or 'write'.
description (str): description of the data read.
data_size (int): size of the data read in bytes.
compressed_data_size (int): size of the compressed data read in bytes.
"""
sample_time = time.time()
sample = '{0:f}\t{1:s}\t{2:s}\t{3:d}\t{4:d}\n'.format(
sample_time, operation, description, data_size, compressed_data_size)
self._WritesString(sample) | 0.001669 |
def lipd_to_df(metadata, csvs):
"""
Create an organized collection of data frames from LiPD data
:param dict metadata: LiPD data
:param dict csvs: Csv data
:return dict: One data frame per table, organized in a dictionary by name
"""
dfs = {}
logger_dataframes.info("enter lipd_to_df")
# Flatten the dictionary, but ignore the chron data items
dict_in_dotted = {}
logger_dataframes.info("enter dot_notation")
_dotnotation_for_nested_dictionary(metadata, '', dict_in_dotted)
dict_in_dotted = collections.OrderedDict(sorted(dict_in_dotted.items()))
# Create one data frame for metadata items
dfs["metadata"] = pd.DataFrame(list(dict_in_dotted.items()), columns=["Key", "Value"])
# Create data frames for paleo data and chron data items. This does not use LiPD data, it uses the csv data
dfs.update(_get_dfs(csvs))
return dfs | 0.003341 |
def get_settings(self, index):
"""Get settings for index.
:param index: index name
"""
settings = self.es.indices.get_settings(index=index)
return next(iter(settings.values())) | 0.009217 |
def get_recipe_env(self, arch, with_flags_in_cc=True):
""" Add libgeos headers to path """
env = super(ShapelyRecipe, self).get_recipe_env(arch, with_flags_in_cc)
libgeos_dir = Recipe.get_recipe('libgeos', self.ctx).get_build_dir(arch.arch)
env['CFLAGS'] += " -I{}/dist/include".format(libgeos_dir)
return env | 0.008596 |
def _escape_filterargs(self, filterargs):
"""
Escapes values in filterargs.
filterargs is a value suitable for Django's string formatting operator
(%), which means it's either a tuple or a dict. This return a new tuple
or dict with all values escaped for use in filter strings.
"""
if isinstance(filterargs, tuple):
filterargs = tuple(self.ldap.filter.escape_filter_chars(value)
for value in filterargs)
elif isinstance(filterargs, dict):
filterargs = dict((key, self.ldap.filter.escape_filter_chars(value))
for key, value in filterargs.items())
else:
raise TypeError("filterargs must be a tuple or dict.")
return filterargs | 0.003731 |
def compute_Pi_J(self, CDR3_seq, J_usage_mask):
"""Compute Pi_J.
This function returns the Pi array from the model factors of the J genomic
contributions, P(delJ|J). This corresponds to J(D)^{x_4}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation
self.cutJ_genomic_CDR3_segs : list of strings
List of all the J genomic nucleotide sequences trimmed to begin at the
conserved 3' residue (F/W) and with the maximum number of palindromic
insertions appended.
self.PJdelJ_nt_pos_vec : list of ndarrays
For each J allele, format P(delJ|J) into the correct form for a Pi
array or J^{x_2}. This is only done for the first and last position
in each codon.
self.PJdelJ_2nd_nt_pos_per_aa_vec : list of dicts
For each J allele, and each 'amino acid', format P(delJ|J) for
positions in the middle of a codon into the correct form for a Pi
array or J^{x_2} given the 'amino acid'.
Returns
-------
Pi_J : ndarray
(4, 3L) array corresponding to J^{x_4}.
r_J_usage_mask: list
Reduced J_usage mask. J genes/alleles with no contribution (bad
alignment) are removed from the mask. This is done to speed up the
computation on the V side (which must be done conditioned on the J).
"""
#Note, the cutJ_genomic_CDR3_segs INCLUDE the palindromic insertions and thus are max_palindrome nts longer than the template.
#furthermore, the genomic sequence should be pruned to start at a conserved region on the J side
Pi_J = [] #Holds the aggregate weight for each nt possiblity and position
r_J_usage_mask = []
for j, J_in in enumerate(J_usage_mask):
try:
cutJ_gen_seg = self.cutJ_genomic_CDR3_segs[J_in]
except IndexError:
print 'Check provided J usage mask. Contains indicies out of allowed range.'
continue
current_alignment_length = self.max_nt_to_aa_alignment_right(CDR3_seq, cutJ_gen_seg)
#alignment_lengths += [current_alignment_length]
current_Pi_J = np.zeros((4, len(CDR3_seq)*3))
if current_alignment_length > 0:
#For first and last nt in a codon use PJdelJ_nt_pos_vec
current_Pi_J[:, -current_alignment_length:] = self.PJdelJ_nt_pos_vec[J_in][:, -current_alignment_length:]
for pos in range(-2, -current_alignment_length-1, -3): #for middle nt use PJdelJ_2nd_nt_pos_per_aa_vec
current_Pi_J[:, pos] = self.PJdelJ_2nd_nt_pos_per_aa_vec[J_in][CDR3_seq[pos/3]][:, pos]
if np.sum(current_Pi_J) > 0:
Pi_J.append(current_Pi_J)
r_J_usage_mask.append(J_in)
return Pi_J, r_J_usage_mask | 0.012873 |
def scale_and_shift(self, scale_pct, shift_pct, callback=True):
"""Stretch and/or shrink the color map via altering the shift map.
"""
maxlen = self.maxc + 1
self.sarr = np.arange(maxlen)
# limit shrinkage to 5% of original size
scale = max(scale_pct, 0.050)
self.scale_pct = scale
work = self._stretch(self.sarr, scale)
n = len(work)
if n < maxlen:
# pad on the lowest and highest values of the shift map
m = (maxlen - n) // 2 + 1
barr = np.array([0] * m)
tarr = np.array([self.maxc] * m)
work = np.concatenate([barr, work, tarr])
work = work[:maxlen]
# we are mimicing ds9's stretch and shift algorithm here.
# ds9 seems to cut the center out of the stretched array
# BEFORE shifting
n = len(work) // 2
halflen = maxlen // 2
work = work[n - halflen:n + halflen].astype(np.uint, copy=False)
assert len(work) == maxlen, \
RGBMapError("scaled shift map is != %d" % maxlen)
# shift map according to the shift_pct
work = self._shift(work, shift_pct)
assert len(work) == maxlen, \
RGBMapError("shifted shift map is != %d" % maxlen)
self.t_.set(shift_array=work) | 0.001508 |
def revokeSystemPermission(self, login, user, perm):
"""
Parameters:
- login
- user
- perm
"""
self.send_revokeSystemPermission(login, user, perm)
self.recv_revokeSystemPermission() | 0.00463 |
def build_acl_port(self, port, enabled=True):
"Build the acl for L4 Ports. "
if port is not None:
if ':' in port:
range = port.replace(':', ' ')
acl = "range %(range)s " % {'range': range}
else:
acl = "eq %(port)s " % {'port': port}
if not enabled:
acl += "inactive"
return acl | 0.004843 |
def get_copy(dict_, key, default=None):
"""
Looks for a key in a dictionary, if found returns
a deepcopied value, otherwise returns default value
"""
value = dict_.get(key, default)
if value:
return deepcopy(value)
return value | 0.00678 |
async def kick_chat_member(self, chat_id: typing.Union[base.Integer, base.String], user_id: base.Integer,
until_date: typing.Union[base.Integer, None] = None) -> base.Boolean:
"""
Use this method to kick a user from a group, a supergroup or a channel.
In the case of supergroups and channels, the user will not be able to return to the group
on their own using invite links, etc., unless unbanned first.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’ setting
is off in the target group.
Otherwise members may only be removed by the group's creator or by the member that added them.
Source: https://core.telegram.org/bots/api#kickchatmember
:param chat_id: Unique identifier for the target group or username of the target supergroup or channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:param until_date: Date when the user will be unbanned, unix time
:type until_date: :obj:`typing.Union[base.Integer, None]`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
until_date = prepare_arg(until_date)
payload = generate_payload(**locals())
result = await self.request(api.Methods.KICK_CHAT_MEMBER, payload)
return result | 0.006207 |
async def resolution(self):
"""Get the resolution voted on.
Returns
-------
awaitable of :class:`aionationstates.ResolutionAtVote`
The resolution voted for.
Raises
------
aionationstates.NotFound
If the resolution has since been passed or defeated.
"""
resolutions = await asyncio.gather(
aionationstates.ga.resolution_at_vote,
aionationstates.sc.resolution_at_vote,
)
for resolution in resolutions:
if (resolution is not None
and resolution.name == self.resolution_name):
return resolution
raise aionationstates.NotFound | 0.002797 |
def user_role_add(user_id=None, user=None, tenant_id=None,
tenant=None, role_id=None, role=None, profile=None,
project_id=None, project_name=None, **connection_args):
'''
Add role for user in tenant (keystone user-role-add)
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_role_add \
user_id=298ce377245c4ec9b70e1c639c89e654 \
tenant_id=7167a092ece84bae8cead4bf9d15bb3b \
role_id=ce377245c4ec9b70e1c639c89e8cead4
salt '*' keystone.user_role_add user=admin tenant=admin role=admin
'''
kstone = auth(profile, **connection_args)
if project_id and not tenant_id:
tenant_id = project_id
elif project_name and not tenant:
tenant = project_name
if user:
user_id = user_get(name=user, profile=profile,
**connection_args)[user].get('id')
else:
user = next(six.iterkeys(user_get(user_id, profile=profile,
**connection_args)))['name']
if not user_id:
return {'Error': 'Unable to resolve user id'}
if tenant:
tenant_id = tenant_get(name=tenant, profile=profile,
**connection_args)[tenant].get('id')
else:
tenant = next(six.iterkeys(tenant_get(tenant_id, profile=profile,
**connection_args)))['name']
if not tenant_id:
return {'Error': 'Unable to resolve tenant/project id'}
if role:
role_id = role_get(name=role, profile=profile,
**connection_args)[role]['id']
else:
role = next(six.iterkeys(role_get(role_id, profile=profile,
**connection_args)))['name']
if not role_id:
return {'Error': 'Unable to resolve role id'}
if _OS_IDENTITY_API_VERSION > 2:
kstone.roles.grant(role_id, user=user_id, project=tenant_id)
else:
kstone.roles.add_user_role(user_id, role_id, tenant_id)
ret_msg = '"{0}" role added for user "{1}" for "{2}" tenant/project'
return ret_msg.format(role, user, tenant) | 0.000466 |
def load_image(name):
"""Load an image"""
image = pyglet.image.load(name).texture
verify_dimensions(image)
return image | 0.007407 |
def clean_value(self, value):
'''
Additional clean action to preprocess value before :meth:`to_python`
method.
Subclasses may define own clean_value method to allow additional clean
actions like html cleanup, etc.
'''
# We have to clean before checking min/max length. It's done in
# separate method to allow additional clean action in subclasses.
if self.nontext_replacement is not None:
value = replace_nontext(value, self.nontext_replacement)
if self.strip:
value = value.strip()
return value | 0.003289 |
def to_python(self, value):
'''
Coerce data from primitive form to native Python types.
Returns the default type (if exists)
'''
try:
if value is None and self._default is not None:
return self.default
self._check_required(value)
if not isinstance(value, self._python_type):
value = self._import(value)
return value
except ValueError as ex:
raise ValueError(ex, self._errors['to_python']) | 0.003788 |
def tofits(outfilename, pixelarray, hdr = None, verbose = True):
"""
Takes a 2D numpy array and write it into a FITS file.
If you specify a header (pyfits format, as returned by fromfits()) it will be used for the image.
You can give me boolean numpy arrays, I will convert them into 8 bit integers.
"""
pixelarrayshape = pixelarray.shape
if verbose :
print "FITS export shape : (%i, %i)" % (pixelarrayshape[0], pixelarrayshape[1])
if pixelarray.dtype.name == "bool":
pixelarray = np.cast["uint8"](pixelarray)
if os.path.isfile(outfilename):
os.remove(outfilename)
if hdr == None: # then a minimal header will be created
hdu = pyfits.PrimaryHDU(pixelarray.transpose())
else: # this if else is probably not needed but anyway ...
hdu = pyfits.PrimaryHDU(pixelarray.transpose(), hdr)
hdu.writeto(outfilename)
if verbose :
print "Wrote %s" % outfilename | 0.016615 |
def from_warc(warc_record):
"""
Extracts relevant information from a WARC record. This function does not invoke scrapy but only uses the article
extractor.
:return:
"""
html = str(warc_record.raw_stream.read())
url = warc_record.rec_headers.get_header('WARC-Target-URI')
download_date = warc_record.rec_headers.get_header('WARC-Date')
article = NewsPlease.from_html(html, url=url, download_date=download_date)
return article | 0.007937 |
def OauthAuthorizeApplication(self, oauth_duration = 'hour'):
"""
Authorize an application using oauth. If this function returns True, the obtained oauth token can be retrieved using getResponse and will be in url-parameters format.
TODO: allow the option to ask the user himself for permission, instead of doing this automatically. Especially important for web applications.
@param oauth_duration (string) (optional) -'hour', 'day', 'week', 'year', 'forever'
@return (boolean) - Boolean indicating whether OauthAuthorizeApplication was successful
"""
if self.__session_id__ == '':
self.__error__ = "not logged in"
return False
# automatically get authorization for the application
parameters = {'oauth_token':self.__oauth_token__.key, 'tok_expir':self.__OauthGetTokExpir__(oauth_duration), 'action':'ALLOW', 'session_id':self.__session_id__}
if self.__SenseApiCall__('/oauth/provider_authorize', 'POST', parameters = parameters):
if self.__status__ == 302:
response = urlparse.parse_qs(urlparse.urlparse(self.__headers__['location'])[4])
verifier = response['oauth_verifier'][0]
self.__oauth_token__.set_verifier(verifier)
return True
else:
self.__setAuthenticationMethod__('session_id')
self.__error__ = "error authorizing application"
return False
else:
self.__setAuthenticationMethod__('session_id')
self.__error__ = "error authorizing application"
return False | 0.011059 |
def _set_where(self):
"""
Set the where clause for the relation query.
:return: self
:rtype: BelongsToMany
"""
foreign = self.get_foreign_key()
self._query.where(foreign, "=", self._parent.get_key())
return self | 0.007194 |
def __git_tag_push():
"""
Push all tags.
The function call will return 0 if the command success.
"""
command = ['git', 'push', 'origin', '--tags']
Shell.msg('Pushing tags...')
if APISettings.DEBUG:
Git.__debug(command, True)
if not call(command):
return True
return False | 0.00542 |
def download_file(self, filename):
"""Download a file from device to local filesystem"""
res = self.__exchange('send("{filename}")'.format(filename=filename))
if ('unexpected' in res) or ('stdin' in res):
log.error('Unexpected error downloading file: %s', res)
raise Exception('Unexpected error downloading file')
#tell device we are ready to receive
self.__write('C')
#we should get a NUL terminated filename to start with
sent_filename = self.__expect(NUL).strip()
log.info('receiveing ' + sent_filename)
#ACK to start download
self.__write(ACK, True)
buf = ''
data = ''
chunk, buf = self.__read_chunk(buf)
#read chunks until we get an empty which is the end
while chunk != '':
self.__write(ACK, True)
data = data + chunk
chunk, buf = self.__read_chunk(buf)
return data | 0.006237 |
def hide_samples(portal):
"""Removes samples views from everywhere, related indexes, etc.
"""
logger.info("Removing Samples from navbar ...")
if "samples" in portal:
portal.manage_delObjects(["samples"])
def remove_samples_action(content_type):
type_info = content_type.getTypeInfo()
actions = map(lambda action: action.id, type_info._actions)
for index, action in enumerate(actions, start=0):
if action == 'samples':
type_info.deleteActions([index])
break
def remove_actions_from_sample(sample):
type_info = sample.getTypeInfo()
idxs = [index for index, value in enumerate(type_info._actions)]
type_info.deleteActions(idxs)
logger.info("Removing Samples action view from inside Clients ...")
for client in portal.clients.objectValues("Client"):
remove_samples_action(client)
logger.info("Removing Samples action view from inside Batches ...")
for batch in portal.batches.objectValues("Batch"):
remove_samples_action(batch)
logger.info("Removing actions from inside Samples ...")
for sample in api.search(dict(portal_type="Sample"), "bika_catalog"):
remove_actions_from_sample(api.get_object(sample))
commit_transaction(portal) | 0.000765 |
def get_joke():
"""Return a Ron Swanson quote.
Returns None if unable to retrieve a quote.
"""
page = requests.get("http://ron-swanson-quotes.herokuapp.com/v2/quotes")
if page.status_code == 200:
jokes = []
jokes = json.loads(page.content.decode(page.encoding))
return '"' + jokes[0] + '" - Ron Swanson'
return None | 0.005391 |
def upsert(self, conflict_target: List, fields: Dict, index_predicate: str=None) -> int:
"""Creates a new record or updates the existing one
with the specified data.
Arguments:
conflict_target:
Fields to pass into the ON CONFLICT clause.
fields:
Fields to insert/update.
index_predicate:
The index predicate to satisfy an arbiter partial index.
Returns:
The primary key of the row that was created/updated.
"""
return self.get_queryset().upsert(conflict_target, fields, index_predicate) | 0.009449 |
def sparse_grid_from_unmasked_sparse_grid(unmasked_sparse_grid, sparse_to_unmasked_sparse):
"""Use the central arc-second coordinate of every unmasked pixelization grid's pixels and mapping between each
pixelization pixel and unmasked pixelization pixel to compute the central arc-second coordinate of every masked
pixelization grid pixel.
Parameters
-----------
unmasked_sparse_grid : ndarray
The (y,x) arc-second centre of every unmasked pixelization grid pixel.
sparse_to_unmasked_sparse : ndarray
The index mapping between every pixelization pixel and masked pixelization pixel.
"""
total_pix_pixels = sparse_to_unmasked_sparse.shape[0]
pix_grid = np.zeros((total_pix_pixels, 2))
for pixel_index in range(total_pix_pixels):
pix_grid[pixel_index, 0] = unmasked_sparse_grid[sparse_to_unmasked_sparse[pixel_index], 0]
pix_grid[pixel_index, 1] = unmasked_sparse_grid[sparse_to_unmasked_sparse[pixel_index], 1]
return pix_grid | 0.006938 |
def _expand_error_codes(code_parts):
"""Return an expanded set of error codes to ignore."""
codes = set(ErrorRegistry.get_error_codes())
expanded_codes = set()
try:
for part in code_parts:
# Dealing with split-lined configurations; The part might begin
# with a whitespace due to the newline character.
part = part.strip()
if not part:
continue
codes_to_add = {code for code in codes
if code.startswith(part)}
if not codes_to_add:
log.warning(
'Error code passed is not a prefix of any '
'known errors: %s', part)
expanded_codes.update(codes_to_add)
except TypeError as e:
raise IllegalConfiguration(e)
return expanded_codes | 0.002148 |
def add_to_group(self, devices):
"""Add device(s) to the group."""
ids = {d.id for d in self.devices_in_group()}
ids.update(self._device_ids(devices))
self._set_group(ids) | 0.009852 |
def show_description(self):
""" Prints the formatted response for the matching return type """
def print_missing(c, v):
resp = self.responses[v["type"]]
name = "[%s] %s" % (resp.label, dr.get_name(c))
self.print_header(name, 3)
print(file=self.stream)
print('*Missing Dependencies*:', file=self.stream)
req_all, req_any = v.missing
if req_all:
print(file=self.stream)
print('* Requires:', file=self.stream)
for m in req_all:
print(' * {}'.format(dr.get_name(m)), file=self.stream)
if req_any:
print(file=self.stream)
for m in req_any:
print('* At Least One Of:', file=self.stream)
if type(m) == list:
for c in m:
print(' * {}'.format(dr.get_name(c)), file=self.stream)
else:
print(' * {}'.format(dr.get_name(m)), file=self.stream)
print(file=self.stream)
def printit(c, v):
print("```", file=self.stream)
print(render(c, v), file=self.stream)
print("```", file=self.stream)
print(file=self.stream)
for c in sorted(self.broker.get_by_type(rule), key=dr.get_name):
v = self.broker[c]
_type = v.get('type')
if _type in self.responses:
self.counts[_type] += 1
if _type:
if self.missing and _type == 'skip':
print_missing(c, v)
elif ((self.fail_only and _type == 'rule') or
(not self.fail_only and _type != 'skip')):
printit(c, v)
print(file=self.stream)
self.print_header("Rule Execution Summary", 2)
print("```", file=self.stream)
for c in self.counts:
print(' ' + self.responses[c].title + str(self.counts[c]), file=self.stream)
print("```", file=self.stream) | 0.002378 |
def load_datafile(name, search_path=('.'), codecs=get_codecs(), **kwargs):
"""
find datafile and load them from codec
TODO only does the first one
kwargs:
default = if passed will return that on failure instead of throwing
"""
mod = find_datafile(name, search_path, codecs)
if not mod:
if 'default' in kwargs:
return kwargs['default']
raise IOError("file %s not found in search path %s" %(name, str(search_path)))
(codec, datafile) = mod[0]
return codec().load(open(datafile)) | 0.005505 |
def user_sentiments_most_frequent(
self,
username = None,
single_most_frequent = True
):
"""
This function returns the most frequent calculated sentiments expressed
in tweets of a specified user. By default, the single most frequent
sentiment is returned. All sentiments with their corresponding
frequencies can be returned also.
"""
try:
sentiment_frequencies = collections.Counter(self.user_sentiments(
username = username
))
if single_most_frequent:
return sentiment_frequencies.most_common(1)[0][0]
else:
return dict(sentiment_frequencies)
except:
log.error("error -- possibly no username specified")
return None | 0.011848 |
def parse_record(self, raw, indx=0):
"""Parse raw data (that is retrieved by "request") and return pandas.DataFrame.
Returns tuple (data, metadata)
data - pandas.DataFrame with retrieved data.
metadata - pandas.DataFrame with info about symbol, currency, frequency,
displayname and status of given request
"""
suffix = '' if indx == 0 else '_%i' % (indx + 1)
# Parsing status
status = self.status(raw)
# Testing if no errors
if status['StatusType'] != 'Connected':
if self.raise_on_error:
raise DatastreamException('%s (error %i): %s --> "%s"' %
(status['StatusType'], status['StatusCode'],
status['StatusMessage'], status['Request']))
else:
self._test_status_and_warn()
return pd.DataFrame(), {}
record = self.extract_data(raw)
get_field = lambda fldname: record[fldname + suffix]
try:
error = get_field('INSTERROR')
if self.raise_on_error:
raise DatastreamException('Error: %s --> "%s"' %
(error, status['Request']))
else:
self.last_status['StatusMessage'] = error
self.last_status['StatusType'] = 'INSTERROR'
self._test_status_and_warn()
metadata = {'Frequency': '', 'Currency': '', 'DisplayName': '',
'Symbol': '', 'Status': error}
except KeyError:
# Parsing metadata of the symbol
# NB! currency might be returned as symbol thus "unicode" should be used
metadata = {'Frequency': ustr(get_field('FREQUENCY')),
'Currency': ustr(get_field('CCY')),
'DisplayName': ustr(get_field('DISPNAME')),
'Symbol': ustr(get_field('SYMBOL')),
'Status': 'OK'}
# Fields with data
if suffix == '':
fields = [ustr(x) for x in record if '_' not in x]
else:
fields = [ustr(x) for x in record if suffix in x]
# Filter metadata
meta_fields = ['CCY', 'DISPNAME', 'FREQUENCY', 'SYMBOL', 'DATE', 'INSTERROR']
fields = [x.replace(suffix, '') for x in fields
if not any([y in x for y in meta_fields])]
if 'DATE' + suffix in record:
date = record['DATE' + suffix]
elif 'DATE' in record:
date = record['DATE']
else:
date = None
if len(fields) > 0 and date is not None:
# Check if we have a single value or a series
if isinstance(date, dt.datetime):
data = pd.DataFrame({x: [get_field(x)] for x in fields},
index=[date])
else:
data = pd.DataFrame({x: get_field(x)[0] for x in fields},
index=date[0])
else:
data = pd.DataFrame()
metadata = pd.DataFrame(metadata, index=[indx])
metadata = metadata[['Symbol', 'DisplayName', 'Currency', 'Frequency', 'Status']]
return data, metadata | 0.003011 |
def prettyPrintSequence(self, sequence, verbosity=1):
"""
Pretty print a sequence.
@param sequence (list) Sequence
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text
"""
text = ""
for i in xrange(len(sequence)):
pattern = sequence[i]
if pattern == None:
text += "<reset>"
if i < len(sequence) - 1:
text += "\n"
else:
text += self.patternMachine.prettyPrintPattern(pattern,
verbosity=verbosity)
return text | 0.010345 |
def find_all(soup, name=None, attrs=None, recursive=True, text=None,
limit=None, **kwargs):
"""The `find` and `find_all` methods of `BeautifulSoup` don't handle the
`text` parameter combined with other parameters. This is necessary for
e.g. finding links containing a string or pattern. This method first
searches by text content, and then by the standard BeautifulSoup arguments.
"""
if text is None:
return soup.find_all(
name, attrs or {}, recursive, text, limit, **kwargs
)
if isinstance(text, string_types):
text = re.compile(re.escape(text), re.I)
tags = soup.find_all(
name, attrs or {}, recursive, **kwargs
)
rv = []
for tag in tags:
if match_text(text, tag):
rv.append(tag)
if limit is not None and len(rv) >= limit:
break
return rv | 0.002252 |
def maybe_store_highlights(file_id, data, tfidf, kvlclient):
'''wrapper around :func:`create_highlights` that stores the response
payload in the `kvlayer` table called `highlights` as a stored
value if data['store'] is `False`. This allows error values as
well as successful responses from :func:`create_highlights` to
both get stored.
'''
payload = create_highlights(data, tfidf)
if data['store'] is True:
stored_payload = {}
stored_payload.update(payload)
stored_payload['state'] = STORED
payload_str = json.dumps(stored_payload)
kvlclient.put('highlights', (file_id, payload_str))
return payload | 0.001479 |
def load_config(filename):
'''
Read contents of config file.
'''
try:
with open(filename, 'r') as config_file:
return json.loads(config_file.read())
except IOError:
pass | 0.004608 |
def _get_worker_id(self, conn):
"""Get the worker ID, using a preestablished connection."""
if self._worker_id is None:
self._worker_id = conn.incr(self._key_worker())
return self._worker_id | 0.00885 |
def export_agg_risk_csv(ekey, dstore):
"""
:param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object
"""
writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
path = '%s.%s' % (sanitize(ekey[0]), ekey[1])
fname = dstore.export_path(path)
writer.save(dstore['agg_risk'].value, fname)
return [fname] | 0.002762 |
def column_summary_data(self):
'''Returns a dictionary of column name -> value, for cluster-level results'''
assembled_summary = self._to_cluster_summary_assembled()
pct_id, read_depth = self._pc_id_and_read_depth_of_longest()
columns = {
'assembled': self._to_cluster_summary_assembled(),
'match': self._has_match(assembled_summary),
'ref_seq': self.ref_name,
'pct_id': str(pct_id),
'ctg_cov': str(read_depth),
'known_var': self._to_cluster_summary_has_known_nonsynonymous(assembled_summary),
'novel_var': self._to_cluster_summary_has_novel_nonsynonymous(assembled_summary)
}
return columns | 0.006897 |
def ignore_exception(exception_class):
"""A decorator that ignores `exception_class` exceptions"""
def _decorator(func):
def newfunc(*args, **kwds):
try:
return func(*args, **kwds)
except exception_class:
pass
return newfunc
return _decorator | 0.003067 |
def create_filehandlers(self, filenames, fh_kwargs=None):
"""Organize the filenames into file types and create file handlers."""
filenames = list(OrderedDict.fromkeys(filenames))
logger.debug("Assigning to %s: %s", self.info['name'], filenames)
self.info.setdefault('filenames', []).extend(filenames)
filename_set = set(filenames)
created_fhs = {}
# load files that we know about by creating the file handlers
for filetype, filetype_info in self.sorted_filetype_items():
filehandlers = self.new_filehandlers_for_filetype(filetype_info,
filename_set,
fh_kwargs=fh_kwargs)
filename_set -= set([fhd.filename for fhd in filehandlers])
if filehandlers:
created_fhs[filetype] = filehandlers
self.file_handlers[filetype] = sorted(
self.file_handlers.get(filetype, []) + filehandlers,
key=lambda fhd: (fhd.start_time, fhd.filename))
# update existing dataset IDs with information from the file handler
self.update_ds_ids_from_file_handlers()
# load any additional dataset IDs determined dynamically from the file
self.add_ds_ids_from_files()
return created_fhs | 0.002155 |
def update(self):
"""Calulate the auxilary term.
>>> from hydpy.models.llake import *
>>> parameterstep('1d')
>>> simulationstep('12h')
>>> n(3)
>>> v(0., 1e5, 1e6)
>>> q(_1=[0., 1., 2.], _7=[0., 2., 5.])
>>> maxdt('12h')
>>> derived.seconds.update()
>>> derived.nmbsubsteps.update()
>>> derived.vq.update()
>>> derived.vq
vq(toy_1_1_0_0_0=[0.0, 243200.0, 2086400.0],
toy_7_1_0_0_0=[0.0, 286400.0, 2216000.0])
"""
con = self.subpars.pars.control
der = self.subpars
for (toy, qs) in con.q:
setattr(self, str(toy), 2.*con.v+der.seconds/der.nmbsubsteps*qs)
self.refresh() | 0.002717 |
def project_activity(index, start, end):
"""Compute the metrics for the project activity section of the enriched
git index.
Returns a dictionary containing a "metric" key. This key contains the
metrics for this section.
:param index: index object
:param start: start date to get the data from
:param end: end date to get the data upto
:return: dictionary with the value of the metrics
"""
results = {
"metrics": [Commits(index, start, end),
Authors(index, start, end)]
}
return results | 0.00177 |
def AddColumn(self, column, default="", col_index=-1):
"""Appends a new column to the table.
Args:
column: A string, name of the column to add.
default: Default value for entries. Defaults to ''.
col_index: Integer index for where to insert new column.
Raises:
TableError: Column name already exists.
"""
if column in self.table:
raise TableError("Column %r already in table." % column)
if col_index == -1:
self._table[0][column] = column
for i in range(1, len(self._table)):
self._table[i][column] = default
else:
self._table[0].Insert(column, column, col_index)
for i in range(1, len(self._table)):
self._table[i].Insert(column, default, col_index) | 0.00246 |
def _fetch_xml(self, url):
"""Fetch a url and parse the document's XML."""
with contextlib.closing(urlopen(url)) as f:
return xml.etree.ElementTree.parse(f).getroot() | 0.010309 |
def predict_percentile(self, X, ancillary_X=None, p=0.5):
"""
Returns the median lifetimes for the individuals, by default. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
http://stats.stackexchange.com/questions/102986/percentile-loss-functions
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
ancillary_X: numpy array or DataFrame, optional
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
p: float, optional (default=0.5)
the percentile, must be between 0 and 1.
Returns
-------
percentiles: DataFrame
See Also
--------
predict_median
"""
lambda_, rho_ = self._prep_inputs_for_prediction_and_return_scores(X, ancillary_X)
return pd.DataFrame(lambda_ * np.power(-np.log(p), 1 / rho_), index=_get_index(X)) | 0.003971 |
def _send_size(self):
" Report terminal size to server. "
rows, cols = _get_size(sys.stdout.fileno())
self._send_packet({
'cmd': 'size',
'data': [rows, cols]
}) | 0.009259 |
def _make_repr(class_name, *args, **kwargs):
"""
Generate a repr string.
Positional arguments should be the positional arguments used to
construct the class. Keyword arguments should consist of tuples of
the attribute value and default. If the value is the default, then
it won't be rendered in the output.
Here's an example::
def __repr__(self):
return make_repr('MyClass', 'foo', name=(self.name, None))
The output of this would be something line ``MyClass('foo',
name='Will')``.
"""
arguments = [repr(arg) for arg in args]
arguments.extend(
"{}={!r}".format(name, value)
for name, (value, default) in sorted(kwargs.items())
if value != default
)
return "{}({})".format(class_name, ", ".join(arguments)) | 0.001235 |
def get_index_labels(self, targets):
"""Get the labels(known target/not) mapped to indices.
:param targets: List of known targets
:return: Dictionary of index-label mappings
"""
target_ind = self.graph.vs.select(name_in=targets).indices
rest_ind = self.graph.vs.select(name_notin=targets).indices
label_mappings = {i: 1 for i in target_ind}
label_mappings.update({i: 0 for i in rest_ind})
return label_mappings | 0.004141 |
def agent_url(self):
"""
This method returns the agent's url.
:return:
"""
try:
if self._data_from_search:
agent = self._data_from_search.find('ul', {'class': 'links'})
links = agent.find_all('a')
return links[1]['href']
else:
return self._ad_page_content.find('a', {'id': 'smi-link-branded'})['href']
except Exception as e:
if self._debug:
logging.error(
"Error getting agent_url. Error message: " + e.args[0])
return | 0.004886 |
def transformer_image_decoder(targets,
encoder_output,
ed_attention_bias,
hparams,
name=None):
"""Transformer image decoder over targets with local attention.
Args:
targets: Tensor of shape [batch, ...], and whose size is batch * height *
width * hparams.num_channels * hparams.hidden_size.
encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size].
ed_attention_bias: Tensor which broadcasts with shape [batch,
hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias.
hparams: HParams.
name: string, variable scope.
Returns:
Tensor of shape [batch, height, width * hparams.num_channels,
hparams.hidden_size].
"""
with tf.variable_scope(name, default_name="transformer_dec"):
batch_size = common_layers.shape_list(targets)[0]
targets = tf.reshape(targets, [batch_size,
hparams.img_len,
hparams.img_len,
hparams.num_channels * hparams.hidden_size])
decoder_input, _, _ = cia.prepare_decoder(targets, hparams)
decoder_output = cia.transformer_decoder_layers(
decoder_input,
encoder_output,
hparams.num_decoder_layers or hparams.num_hidden_layers,
hparams,
attention_type=hparams.dec_attention_type,
encoder_decoder_attention_bias=ed_attention_bias,
name="decoder")
decoder_output = tf.reshape(decoder_output,
[batch_size,
hparams.img_len,
hparams.img_len * hparams.num_channels,
hparams.hidden_size])
return decoder_output | 0.001632 |
def post_state(self, name, state):
"""Asynchronously try to update the state for a service.
If the update fails, nothing is reported because we don't wait for a
response from the server. This function will return immmediately and
not block.
Args:
name (string): The name of the service
state (int): The new state of the service
"""
self.post_command(OPERATIONS.CMD_UPDATE_STATE,
{'name': name, 'new_status': state}) | 0.00381 |
def create_database(self, name, owner=None):
"""
Create a new MapD database
Parameters
----------
name : string
Database name
"""
statement = ddl.CreateDatabase(name, owner=owner)
self._execute(statement) | 0.007168 |
def check_exists(path, type='file'):
""" Check if a file or a folder exists """
if type == 'file':
if not os.path.isfile(path):
raise RuntimeError('The file `%s` does not exist.' % path)
else:
if not os.path.isdir(path):
raise RuntimeError('The folder `%s` does not exist.' % path)
return True | 0.002849 |
def reduce(self):
"""Reduce to a canonical form."""
support = frozenset(range(1, self.nvars+1))
new_clauses = set()
for clause in self.clauses:
vs = list(support - {abs(uniqid) for uniqid in clause})
if vs:
for num in range(1 << len(vs)):
new_part = {v if bit_on(num, i) else ~v
for i, v in enumerate(vs)}
new_clauses.add(clause | new_part)
else:
new_clauses.add(clause)
return self.__class__(self.nvars, new_clauses) | 0.00335 |
def valueFromString(self, value, context=None):
"""
Converts the inputted string text to a value that matches the type from
this column type.
:param value | <str>
extra | <variant>
"""
if value in ('today', 'now'):
return datetime.datetime.utcnow()
elif dateutil_parser:
return dateutil_parser.parse(value)
else:
time_struct = time.strptime(value, self.defaultFormat())
return datetime.datetime(time_struct.tm_year,
time_struct.tm_month,
time_struct.tm_day,
time_struct.tm_hour,
time_struct.tm_minute,
time_struct.tm_sec) | 0.00237 |
def translate_input(translator,
skip_translate=None,
ignore_collisions=False,
validate_ip_addrs=True,
**kwargs):
'''
Translate CLI/SLS input into the format the API expects. The ``translator``
argument must be a module containing translation functions, within
salt.utils.docker.translate. A ``skip_translate`` kwarg can be passed to
control which arguments are translated. It can be either a comma-separated
list or an iterable containing strings (e.g. a list or tuple), and members
of that tuple will have their translation skipped. Optionally,
skip_translate can be set to True to skip *all* translation.
'''
kwargs = copy.deepcopy(salt.utils.args.clean_kwargs(**kwargs))
invalid = {}
collisions = []
if skip_translate is True:
# Skip all translation
return kwargs
else:
if not skip_translate:
skip_translate = ()
else:
try:
skip_translate = _split(skip_translate)
except AttributeError:
pass
if not hasattr(skip_translate, '__iter__'):
log.error('skip_translate is not an iterable, ignoring')
skip_translate = ()
try:
# Using list(kwargs) here because if there are any invalid arguments we
# will be popping them from the kwargs.
for key in list(kwargs):
real_key = translator.ALIASES.get(key, key)
if real_key in skip_translate:
continue
# ipam_pools is designed to be passed as a list of actual
# dictionaries, but if each of the dictionaries passed has a single
# element, it will be incorrectly repacked.
if key != 'ipam_pools' and salt.utils.data.is_dictlist(kwargs[key]):
kwargs[key] = salt.utils.data.repack_dictlist(kwargs[key])
try:
kwargs[key] = getattr(translator, real_key)(
kwargs[key],
validate_ip_addrs=validate_ip_addrs,
skip_translate=skip_translate)
except AttributeError:
log.debug('No translation function for argument \'%s\'', key)
continue
except SaltInvocationError as exc:
kwargs.pop(key)
invalid[key] = exc.strerror
try:
translator._merge_keys(kwargs)
except AttributeError:
pass
# Convert CLI versions of commands to their docker-py counterparts
for key in translator.ALIASES:
if key in kwargs:
new_key = translator.ALIASES[key]
value = kwargs.pop(key)
if new_key in kwargs:
collisions.append(new_key)
else:
kwargs[new_key] = value
try:
translator._post_processing(kwargs, skip_translate, invalid)
except AttributeError:
pass
except Exception as exc:
error_message = exc.__str__()
log.error(
'Error translating input: \'%s\'', error_message, exc_info=True)
else:
error_message = None
error_data = {}
if error_message is not None:
error_data['error_message'] = error_message
if invalid:
error_data['invalid'] = invalid
if collisions and not ignore_collisions:
for item in collisions:
error_data.setdefault('collisions', []).append(
'\'{0}\' is an alias for \'{1}\', they cannot both be used'
.format(translator.ALIASES_REVMAP[item], item)
)
if error_data:
raise CommandExecutionError(
'Failed to translate input', info=error_data)
return kwargs | 0.000519 |
def _connect(self, host, port, proc, timeout_seconds):
"""Connect to the websocket, retrying as needed. Returns the socket."""
if ":" in host and not host.startswith("["): # Support ipv6 addresses.
host = "[%s]" % host
url = "ws://%s:%s/sc2api" % (host, port)
was_running = False
for i in range(timeout_seconds):
is_running = proc and proc.running
was_running = was_running or is_running
if (i >= timeout_seconds // 4 or was_running) and not is_running:
logging.warning(
"SC2 isn't running, so bailing early on the websocket connection.")
break
logging.info("Connecting to: %s, attempt: %s, running: %s", url, i,
is_running)
try:
return websocket.create_connection(url, timeout=timeout_seconds)
except socket.error:
pass # SC2 hasn't started listening yet.
except websocket.WebSocketBadStatusException as err:
if err.status_code == 404:
pass # SC2 is listening, but hasn't set up the /sc2api endpoint yet.
else:
raise
time.sleep(1)
raise ConnectError("Failed to connect to the SC2 websocket. Is it up?") | 0.010135 |
def _merge_dicts(dics, container=dict):
"""
:param dics: [<dict/-like object must not have same keys each other>]
:param container: callble to make a container object
:return: <container> object
>>> _merge_dicts(({}, ))
{}
>>> _merge_dicts(({'a': 1}, ))
{'a': 1}
>>> sorted(kv for kv in _merge_dicts(({'a': 1}, {'b': 2})).items())
[('a', 1), ('b', 2)]
"""
dic_itr = anyconfig.compat.from_iterable(d.items() for d in dics)
return container(anyconfig.compat.OrderedDict(dic_itr)) | 0.001887 |
def to_camel_case(text):
"""Convert to camel case.
:param str text:
:rtype: str
:return:
"""
split = text.split('_')
return split[0] + "".join(x.title() for x in split[1:]) | 0.004975 |
def get_ratefactor(self, base, code):
"""
Return the Decimal currency exchange rate factor of 'code' compared to 1 'base' unit, or RuntimeError
Yahoo currently uses USD as base currency, but here we detect it with get_baserate
"""
raise RuntimeError("%s Deprecated: API withdrawn in February 2018" % self.name)
try:
rate = self.get_rate(code)
except RuntimeError:
# fallback
return self.get_singlerate(base, code)
self.check_ratebase(rate)
ratefactor = Decimal(rate['price'])
if base == self.base:
return ratefactor
else:
return self.ratechangebase(ratefactor, self.base, base) | 0.006878 |
def enqueue(self, priority: int, item: TItem) -> bool:
"""Adds an entry to the priority queue.
If drop_duplicate_entries is set and there is already a (priority, item)
entry in the queue, then the enqueue is ignored. Check the return value
to determine if an enqueue was kept or dropped.
Args:
priority: The priority of the item. Lower priorities dequeue before
higher priorities.
item: The item associated with the given priority.
Returns:
True if the item was enqueued. False if drop_duplicate_entries is
set and the item is already in the queue.
"""
if self._drop_set is not None:
if (priority, item) in self._drop_set:
return False
self._drop_set.add((priority, item))
# First enqueue initializes self._offset.
if not self._buckets:
self._buckets.append([item])
self._offset = priority
self._len = 1
return True
# Where is the bucket this item is supposed to go into?
i = priority - self._offset
# Extend bucket list backwards if needed.
if i < 0:
self._buckets[:0] = [[] for _ in range(-i)]
self._offset = priority
i = 0
# Extend bucket list forwards if needed.
while i >= len(self._buckets):
self._buckets.append([])
# Finish by adding item to the intended bucket's list.
self._buckets[i].append(item)
self._len += 1
return True | 0.001873 |
def write(_filename, _long, enter=True):
"""Write the call info to file"""
def method(*arg, **kw): # pylint: disable=W0613
"""Reference to the advice in order to facilitate argument support."""
def get_short(_fname):
"""Get basename of the file. If file is __init__.py, get its directory too"""
dir_path, short_fname = os.path.split(_fname)
short_fname = short_fname.replace(".py", "")
if short_fname == "__init__":
short_fname = "%s.%s" % (os.path.basename(dir_path), short_fname)
return short_fname
def get_long(_fname):
"""Get full reference to the file"""
try:
return re.findall(r'(ansible.*)\.py', _fname)[-1].replace(os.sep, ".")
except IndexError:
# If ansible is extending some library, ansible won't be present in the path.
return get_short(_fname)
meth_code = arg[1].im_func.func_code
fname, lineno, _name = meth_code.co_filename, meth_code.co_firstlineno, meth_code.co_name
marker = ENTER_MARKER
if not _long:
_fname, _rjust = get_short(fname), RJUST_SMALL
else:
_fname, _rjust = get_long(fname), RJUST_LONG
if not enter:
try:
meth_line_count = len(inspect.getsourcelines(meth_code)[0])
lineno += meth_line_count - 1
except Exception: # pylint: disable=W0703
# TODO: Find other way to get ending line number for the method
# Line number same as start of method.
pass
marker = EXIT_MARKER
with open(_filename, "a") as fptr:
call_info = "%s: %s:%s %s%s\n" % (
_fname.rjust(_rjust), # filename
str(lineno).rjust(4), # line number
(" %s" % DEPTH_MARKER) * COUNT, # Depth
marker, # Method enter, exit marker
_name # Method name
)
fptr.write(call_info)
return method | 0.00278 |
def count_words(pattern):
"""
Count the number of words in a pattern as well as the total length of those words
:param pattern: The pattern to parse
:type pattern: str
:return: The word count first, then the total length of all words
:rtype : tuple of (int, int)
"""
word_pattern = re.compile(r'(\b(?<![\(\)\[\]\|])\w\w*\b(?![\(\)\[\]\|]))', re.IGNORECASE)
words = word_pattern.findall(pattern)
word_count = len(words)
word_len = sum(len(word) for word in words)
return word_count, word_len | 0.00678 |
def build_api_struct(self):
"""
Calls the clean method of the class and returns the info in a structure
that Atlas API is accepting.
"""
self.clean()
r = {
"type": self._type,
"requested": self._requested,
"value": self._value
}
if self._tags:
r["tags"] = self._tags
return r | 0.005051 |
def status(name='all'):
'''
Using drbdadm to show status of the DRBD devices,
available in the latest drbd9.
Support multiple nodes, multiple volumes.
:type name: str
:param name:
Resource name.
:return: drbd status of resource.
:rtype: list(dict(res))
CLI Example:
.. code-block:: bash
salt '*' drbd.status
salt '*' drbd.status name=<resource name>
'''
# Initialize for multiple times test cases
global ret
global resource
ret = []
resource = {}
cmd = ['drbdadm', 'status']
cmd.append(name)
#One possible output: (number of resource/node/vol are flexible)
#resource role:Secondary
# volume:0 disk:Inconsistent
# volume:1 disk:Inconsistent
# drbd-node1 role:Primary
# volume:0 replication:SyncTarget peer-disk:UpToDate done:10.17
# volume:1 replication:SyncTarget peer-disk:UpToDate done:74.08
# drbd-node2 role:Secondary
# volume:0 peer-disk:Inconsistent resync-suspended:peer
# volume:1 peer-disk:Inconsistent resync-suspended:peer
for line in __salt__['cmd.run'](cmd).splitlines():
_line_parser(line)
if resource:
ret.append(resource)
return ret | 0.002423 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.