text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def create_session(self, **params):
"""
Create the session
date format: YYYY-MM-DDThh:mm
location: ISO code
"""
required_keys = ('market', 'currency', 'locale', 'pickupplace',
'dropoffplace', 'pickupdatetime', 'dropoffdatetime',
'driverage')
service_url = "{url}/{params_path}".format(
url=self.PRICING_SESSION_URL,
params_path=self._construct_params(params, required_keys)
)
poll_path = self.make_request(service_url,
headers=self._session_headers(),
callback=lambda resp: resp.headers[
'location'],
userip=params['userip'])
return "{url}{path}".format(url=self.API_HOST, path=poll_path) | 0.002217 |
def to_html(graph: BELGraph, chart: Optional[str] = None) -> str:
"""Render the graph as an HTML string.
Common usage may involve writing to a file like:
>>> from pybel.examples import sialic_acid_graph
>>> with open('ideogram_output.html', 'w') as file:
... print(to_html(sialic_acid_graph), file=file)
"""
with open(os.path.join(HERE, 'index.html'), 'rt') as f:
html_template = Template(f.read())
return html_template.render(**_get_context(graph, chart=chart)) | 0.001965 |
def get_visible_elements(self, locator, params=None, timeout=None):
"""
Get elements both present AND visible in the DOM.
If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise
TimeoutException should the element not be found.
:param locator: locator tuple
:param params: (optional) locator params
:param timeout: (optional) time to wait for element (default: self._explicit_wait)
:return: WebElement instance
"""
return self.get_present_elements(locator, params, timeout, True) | 0.006568 |
def _longest_common_subsequence(x, y):
"""
Return the longest common subsequence between two sequences.
Parameters
----------
x, y : sequence
Returns
-------
sequence
Longest common subsequence of x and y.
Examples
--------
>>> _longest_common_subsequence("AGGTAB", "GXTXAYB")
['G', 'T', 'A', 'B']
>>> _longest_common_subsequence(["A", "GA", "G", "T", "A", "B"],
... ["GA", "X", "T", "X", "A", "Y", "B"])
['GA', 'T', 'A', 'B']
"""
m = len(x)
n = len(y)
# L[i, j] will contain the length of the longest common subsequence of
# x[0..i - 1] and y[0..j - 1].
L = _np.zeros((m + 1, n + 1), dtype=int)
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
continue
elif x[i - 1] == y[j - 1]:
L[i, j] = L[i - 1, j - 1] + 1
else:
L[i, j] = max(L[i - 1, j], L[i, j - 1])
ret = []
i, j = m, n
while i > 0 and j > 0:
# If current character in x and y are same, then current character is
# part of the longest common subsequence.
if x[i - 1] == y[j - 1]:
ret.append(x[i - 1])
i, j = i - 1, j - 1
# If not same, then find the larger of two and go in the direction of
# larger value.
elif L[i - 1, j] > L[i, j - 1]:
i -= 1
else:
j -= 1
return ret[::-1] | 0.000668 |
def _check_for_answers(self, pk):
"""
Callback called for every packet received to check if we are
waiting for an answer on this port. If so, then cancel the retry
timer.
"""
longest_match = ()
if len(self._answer_patterns) > 0:
data = (pk.header,) + tuple(pk.data)
for p in list(self._answer_patterns.keys()):
logger.debug('Looking for pattern match on %s vs %s', p, data)
if len(p) <= len(data):
if p == data[0:len(p)]:
match = data[0:len(p)]
if len(match) >= len(longest_match):
logger.debug('Found new longest match %s', match)
longest_match = match
if len(longest_match) > 0:
self._answer_patterns[longest_match].cancel()
del self._answer_patterns[longest_match] | 0.002139 |
def _store(self, lines, buffer=None, store='source'):
"""Store one or more lines of input.
If input lines are not newline-terminated, a newline is automatically
appended."""
if buffer is None:
buffer = self._buffer
if lines.endswith('\n'):
buffer.append(lines)
else:
buffer.append(lines+'\n')
setattr(self, store, self._set_source(buffer)) | 0.008811 |
def GetNumberOfRows(self, table_name):
"""Retrieves the number of rows in the table.
Args:
table_name (str): name of the table.
Returns:
int: number of rows.
Raises:
IOError: if the file-like object has not been opened.
OSError: if the file-like object has not been opened.
"""
if not self._connection:
raise IOError('Not opened.')
self._cursor.execute(self._NUMBER_OF_ROWS_QUERY.format(table_name))
row = self._cursor.fetchone()
if not row:
raise IOError(
'Unable to retrieve number of rows of table: {0:s}'.format(
table_name))
number_of_rows = row[0]
if isinstance(number_of_rows, py2to3.STRING_TYPES):
try:
number_of_rows = int(number_of_rows, 10)
except ValueError as exception:
raise IOError((
'Unable to determine number of rows of table: {0:s} '
'with error: {1!s}').format(table_name, exception))
return number_of_rows | 0.005035 |
def env(key, default):
"""
Helper to try to get a setting from the environment, or pyconfig, or
finally use a provided default.
"""
value = os.environ.get(key, None)
if value is not None:
log.info(' %s = %r', key.lower().replace('_', '.'), value)
return value
key = key.lower().replace('_', '.')
value = get(key)
if value is not None:
return value
return default | 0.002315 |
def get_task(client, task_id):
''' Gets task information for the given ID '''
endpoint = '/'.join([client.api.Endpoints.TASKS, str(task_id)])
response = client.authenticated_request(endpoint)
return response.json() | 0.004348 |
def Gamma(theta: vertex_constructor_param_types, k: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of theta and k to matching shaped gamma.
:param theta: the theta (scale) of the Gamma with either the same shape as specified for this vertex
:param k: the k (shape) of the Gamma with either the same shape as specified for this vertex
"""
return Double(context.jvm_view().GammaVertex, label, cast_to_double_vertex(theta), cast_to_double_vertex(k)) | 0.016423 |
def complex_exists(self, complex: str) -> bool:
"""
Shortcut to check if complex exists in our database.
"""
try:
self.check_complex(complex)
except exceptions.RumetrComplexNotFound:
return False
return True | 0.007143 |
def sub(self):
'''
:param fields:
Set fields to substitute
:returns:
Substituted Template with given fields.
If no fields were set up beforehand, :func:`raw` is used.
'''
if self.__fields:
return _Template(self.raw).substitute(self.__fields)
return self.raw | 0.00565 |
def reverse_id(self):
"""Generate the id of reverse_variable from the reaction's id."""
return '_'.join((self.id, 'reverse',
hashlib.md5(
self.id.encode('utf-8')).hexdigest()[0:5])) | 0.007968 |
def set_itunes_explicit(self):
"""Parses explicit from itunes tags and sets value"""
try:
self.itunes_explicit = self.soup.find('itunes:explicit').string
self.itunes_explicit = self.itunes_explicit.lower()
except AttributeError:
self.itunes_explicit = None | 0.006329 |
def follower_num(self):
"""获取追随者数量,就是关注此人的人数.
:return: 追随者数量
:rtype: int
"""
if self.url is None:
return 0
else:
number = int(self.soup.find(
'div', class_='zm-profile-side-following zg-clear').find_all(
'a')[1].strong.text)
return number | 0.005634 |
def ingest(self, co, classname=None, code_objects={}, show_asm=None):
"""
Pick out tokens from an uncompyle6 code object, and transform them,
returning a list of uncompyle6 Token's.
The transformations are made to assist the deparsing grammar.
Specificially:
- various types of LOAD_CONST's are categorized in terms of what they load
- COME_FROM instructions are added to assist parsing control structures
- MAKE_FUNCTION and FUNCTION_CALLS append the number of positional arguments
- some EXTENDED_ARGS instructions are removed
Also, when we encounter certain tokens, we add them to a set which will cause custom
grammar rules. Specifically, variable arg tokens like MAKE_FUNCTION or BUILD_LIST
cause specific rules for the specific number of arguments they take.
"""
if not show_asm:
show_asm = self.show_asm
bytecode = self.build_instructions(co)
# show_asm = 'both'
if show_asm in ('both', 'before'):
for instr in bytecode.get_instructions(co):
print(instr.disassemble())
# list of tokens/instructions
tokens = []
# "customize" is in the process of going away here
customize = {}
if self.is_pypy:
customize['PyPy'] = 0
# Scan for assertions. Later we will
# turn 'LOAD_GLOBAL' to 'LOAD_ASSERT'.
# 'LOAD_ASSERT' is used in assert statements.
self.load_asserts = set()
n = len(self.insts)
for i, inst in enumerate(self.insts):
# We need to detect the difference between:
# raise AssertionError
# and
# assert ...
# If we have a JUMP_FORWARD after the
# RAISE_VARARGS then we have a "raise" statement
# else we have an "assert" statement.
if self.version == 3.0:
# There is a an implied JUMP_IF_TRUE that we are not testing for (yet?) here
assert_can_follow = inst.opname == 'POP_TOP' and i+1 < n
else:
assert_can_follow = inst.opname == 'POP_JUMP_IF_TRUE' and i+1 < n
if assert_can_follow:
next_inst = self.insts[i+1]
if (next_inst.opname == 'LOAD_GLOBAL' and
next_inst.argval == 'AssertionError'):
if (i + 2 < n and self.insts[i+2].opname.startswith('RAISE_VARARGS')):
self.load_asserts.add(next_inst.offset)
pass
pass
# Get jump targets
# Format: {target offset: [jump offsets]}
jump_targets = self.find_jump_targets(show_asm)
# print("XXX2", jump_targets)
last_op_was_break = False
for i, inst in enumerate(self.insts):
argval = inst.argval
op = inst.opcode
if inst.opname == 'EXTENDED_ARG':
# FIXME: The EXTENDED_ARG is used to signal annotation
# parameters
if (i+1 < n and
self.insts[i+1].opcode != self.opc.MAKE_FUNCTION):
continue
if inst.offset in jump_targets:
jump_idx = 0
# We want to process COME_FROMs to the same offset to be in *descending*
# offset order so we have the larger range or biggest instruction interval
# last. (I think they are sorted in increasing order, but for safety
# we sort them). That way, specific COME_FROM tags will match up
# properly. For example, a "loop" with an "if" nested in it should have the
# "loop" tag last so the grammar rule matches that properly.
for jump_offset in sorted(jump_targets[inst.offset], reverse=True):
come_from_name = 'COME_FROM'
opname = self.opname_for_offset(jump_offset)
if opname == 'EXTENDED_ARG':
j = xdis.next_offset(op, self.opc, jump_offset)
opname = self.opname_for_offset(j)
if opname.startswith('SETUP_'):
come_from_type = opname[len('SETUP_'):]
come_from_name = 'COME_FROM_%s' % come_from_type
pass
elif inst.offset in self.except_targets:
come_from_name = 'COME_FROM_EXCEPT_CLAUSE'
tokens.append(Token(come_from_name,
jump_offset, repr(jump_offset),
offset='%s_%s' % (inst.offset, jump_idx),
has_arg = True, opc=self.opc))
jump_idx += 1
pass
pass
elif inst.offset in self.else_start:
end_offset = self.else_start[inst.offset]
tokens.append(Token('ELSE',
None, repr(end_offset),
offset='%s' % (inst.offset),
has_arg = True, opc=self.opc))
pass
pattr = inst.argrepr
opname = inst.opname
if op in self.opc.CONST_OPS:
const = argval
if iscode(const):
if const.co_name == '<lambda>':
assert opname == 'LOAD_CONST'
opname = 'LOAD_LAMBDA'
elif const.co_name == '<genexpr>':
opname = 'LOAD_GENEXPR'
elif const.co_name == '<dictcomp>':
opname = 'LOAD_DICTCOMP'
elif const.co_name == '<setcomp>':
opname = 'LOAD_SETCOMP'
elif const.co_name == '<listcomp>':
opname = 'LOAD_LISTCOMP'
# verify() uses 'pattr' for comparison, since 'attr'
# now holds Code(const) and thus can not be used
# for comparison (todo: think about changing this)
# pattr = 'code_object @ 0x%x %s->%s' %\
# (id(const), const.co_filename, const.co_name)
pattr = '<code_object ' + const.co_name + '>'
else:
if isinstance(inst.arg, int) and inst.arg < len(co.co_consts):
argval, _ = _get_const_info(inst.arg, co.co_consts)
# Why don't we use _ above for "pattr" rather than "const"?
# This *is* a little hoaky, but we have to coordinate with
# other parts like n_LOAD_CONST in pysource.py for example.
pattr = const
pass
elif opname in ('MAKE_FUNCTION', 'MAKE_CLOSURE'):
if self.version >= 3.6:
# 3.6+ doesn't have MAKE_CLOSURE, so opname == 'MAKE_FUNCTION'
flags = argval
opname = 'MAKE_FUNCTION_%d' % (flags)
attr = []
for flag in self.MAKE_FUNCTION_FLAGS:
bit = flags & 1
attr.append(bit)
flags >>= 1
attr = attr[:4] # remove last value: attr[5] == False
else:
pos_args, name_pair_args, annotate_args = parse_fn_counts(inst.argval)
pattr = ("%d positional, %d keyword pair, %d annotated" %
(pos_args, name_pair_args, annotate_args))
if name_pair_args > 0:
opname = '%s_N%d' % (opname, name_pair_args)
pass
if annotate_args > 0:
opname = '%s_A_%d' % (opname, annotate_args)
pass
opname = '%s_%d' % (opname, pos_args)
attr = (pos_args, name_pair_args, annotate_args)
tokens.append(
Token(
opname = opname,
attr = attr,
pattr = pattr,
offset = inst.offset,
linestart = inst.starts_line,
op = op,
has_arg = inst.has_arg,
opc = self.opc
)
)
continue
elif op in self.varargs_ops:
pos_args = argval
if self.is_pypy and not pos_args and opname == 'BUILD_MAP':
opname = 'BUILD_MAP_n'
else:
opname = '%s_%d' % (opname, pos_args)
elif self.is_pypy and opname == 'JUMP_IF_NOT_DEBUG':
# The value in the dict is in special cases in semantic actions, such
# as JUMP_IF_NOT_DEBUG. The value is not used in these cases, so we put
# in arbitrary value 0.
customize[opname] = 0
elif opname == 'UNPACK_EX':
# FIXME: try with scanner and parser by
# changing argval
before_args = argval & 0xFF
after_args = (argval >> 8) & 0xff
pattr = "%d before vararg, %d after" % (before_args, after_args)
argval = (before_args, after_args)
opname = '%s_%d+%d' % (opname, before_args, after_args)
elif op == self.opc.JUMP_ABSOLUTE:
# Further classify JUMP_ABSOLUTE into backward jumps
# which are used in loops, and "CONTINUE" jumps which
# may appear in a "continue" statement. The loop-type
# and continue-type jumps will help us classify loop
# boundaries The continue-type jumps help us get
# "continue" statements with would otherwise be turned
# into a "pass" statement because JUMPs are sometimes
# ignored in rules as just boundary overhead. In
# comprehensions we might sometimes classify JUMP_BACK
# as CONTINUE, but that's okay since we add a grammar
# rule for that.
pattr = argval
target = self.get_target(inst.offset)
if target <= inst.offset:
next_opname = self.insts[i+1].opname
# 'Continue's include jumps to loops that are not
# and the end of a block which follow with POP_BLOCK and COME_FROM_LOOP.
# If the JUMP_ABSOLUTE is to a FOR_ITER and it is followed by another JUMP_FORWARD
# then we'll take it as a "continue".
is_continue = (self.insts[self.offset2inst_index[target]]
.opname == 'FOR_ITER'
and self.insts[i+1].opname == 'JUMP_FORWARD')
if (is_continue or
(inst.offset in self.stmts and (inst.starts_line and
next_opname not in self.not_continue_follow))):
opname = 'CONTINUE'
else:
opname = 'JUMP_BACK'
# FIXME: this is a hack to catch stuff like:
# if x: continue
# the "continue" is not on a new line.
# There are other situations where we don't catch
# CONTINUE as well.
if tokens[-1].kind == 'JUMP_BACK' and tokens[-1].attr <= argval:
if tokens[-2].kind == 'BREAK_LOOP':
del tokens[-1]
else:
# intern is used because we are changing the *previous* token
tokens[-1].kind = intern('CONTINUE')
if last_op_was_break and opname == 'CONTINUE':
last_op_was_break = False
continue
# FIXME: go over for Python 3.6+. This is sometimes wrong
elif op == self.opc.RETURN_VALUE:
if inst.offset in self.return_end_ifs:
opname = 'RETURN_END_IF'
elif inst.offset in self.load_asserts:
opname = 'LOAD_ASSERT'
last_op_was_break = opname == 'BREAK_LOOP'
tokens.append(
Token(
opname = opname,
attr = argval,
pattr = pattr,
offset = inst.offset,
linestart = inst.starts_line,
op = op,
has_arg = inst.has_arg,
opc = self.opc
)
)
pass
if show_asm in ('both', 'after'):
for t in tokens:
print(t.format(line_prefix='L.'))
print()
return tokens, customize | 0.005529 |
def after_connect(self):
"""Execute after connect."""
# TODO: check if this works.
show_users = self.device.send("show users", timeout=120)
result = re.search(pattern_manager.pattern(self.platform, 'connected_locally'), show_users)
if result:
self.log('Locally connected to Calvados. Exiting.')
self.device.send('exit')
return True
return False | 0.007009 |
def kill_dashboard(self, check_alive=True):
"""Kill the dashboard.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
"""
self._kill_process_type(
ray_constants.PROCESS_TYPE_DASHBOARD, check_alive=check_alive) | 0.006472 |
def types(self):
"""
Tuple containing types transformed by this transformer.
"""
out = []
if self._transform_bytes:
out.append(bytes)
if self._transform_str:
out.append(str)
return tuple(out) | 0.00738 |
def satisfies(guard):
"""Returns the current token if it satisfies the guard function provided.
Fails otherwise.
This is the a generalisation of one_of.
"""
i = peek()
if (i is EndOfFile) or (not guard(i)):
fail(["<satisfies predicate " + _fun_to_str(guard) + ">"])
next()
return i | 0.006135 |
def sql(self, stmt, parameters=None, bulk_parameters=None):
"""
Execute SQL stmt against the crate server.
"""
if stmt is None:
return None
data = _create_sql_payload(stmt, parameters, bulk_parameters)
logger.debug(
'Sending request to %s with payload: %s', self.path, data)
content = self._json_request('POST', self.path, data=data)
logger.debug("JSON response for stmt(%s): %s", stmt, content)
return content | 0.003937 |
def on_demand_annotation(twitter_app_key, twitter_app_secret, user_twitter_id):
"""
A service that leverages twitter lists for on-demand annotation of popular users.
TODO: Do this.
"""
####################################################################################################################
# Log into my application
####################################################################################################################
twitter = login(twitter_app_key, twitter_app_secret)
twitter_lists_list = twitter.get_list_memberships(user_id=user_twitter_id, count=1000)
for twitter_list in twitter_lists_list:
print(twitter_list)
return twitter_lists_list | 0.006887 |
def form_uri(item_content, item_class):
"""Form the URI for a music service element.
:param item_content: The content dict of the item
:type item_content: dict
:param item_class: The class of the item
:type item_class: Sub-class of
:py:class:`soco.data_structures.MusicServiceItem`
"""
extension = None
if 'mime_type' in item_content:
extension = MIME_TYPE_TO_EXTENSION[item_content['mime_type']]
out = URIS.get(item_class)
if out:
out = out.format(extension=extension, **item_content)
return out | 0.003221 |
def qax(mt, x, q, m=1):
""" geometrica """
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return ax(mtj, x, m) | 0.006579 |
def prepare(query, params):
"""
For every match of the form ":param_name", call marshal
on kwargs['param_name'] and replace that section of the query
with the result
"""
def repl(match):
name = match.group(1)[1:]
if name in params:
return marshal(params[name])
return ":%s" % name
new, count = re.subn(_param_re, repl, query)
if len(params) > count:
raise cql.ProgrammingError("More keywords were provided "
"than parameters")
return new | 0.001805 |
def encrypt(self, key=None, iv="", cek="", **kwargs):
"""
Produces a JWE as defined in RFC7516 using an Elliptic curve key
:param key: *Not used>, only there to present the same API as
JWE_RSA and JWE_SYM
:param iv: Initialization vector
:param cek: Content master key
:param kwargs: Extra keyword arguments
:return: An encrypted JWT
"""
_msg = as_bytes(self.msg)
_args = self._dict
try:
_args["kid"] = kwargs["kid"]
except KeyError:
pass
if 'params' in kwargs:
if 'apu' in kwargs['params']:
_args['apu'] = kwargs['params']['apu']
if 'apv' in kwargs['params']:
_args['apv'] = kwargs['params']['apv']
if 'epk' in kwargs['params']:
_args['epk'] = kwargs['params']['epk']
jwe = JWEnc(**_args)
ctxt, tag, cek = super(JWE_EC, self).enc_setup(
self["enc"], _msg, auth_data=jwe.b64_encode_header(), key=cek,
iv=iv)
if 'encrypted_key' in kwargs:
return jwe.pack(parts=[kwargs['encrypted_key'], iv, ctxt, tag])
return jwe.pack(parts=[iv, ctxt, tag]) | 0.001618 |
def getFingerprintsForExpressions(self, body, sparsity=1.0):
"""Bulk resolution of expressions
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
list of Fingerprint
Raises:
CorticalioException: if the request was not successful
"""
return self._expressions.resolveBulkExpression(self._retina, body, sparsity) | 0.009398 |
def get_primary_key(self, table):
"""Retrieve the column which is the primary key for a table."""
for column in self.get_schema(table):
if len(column) > 3 and 'pri' in column[3].lower():
return column[0] | 0.008097 |
def initialize_acceptance_criteria(**kwargs):
'''
initialize acceptance criteria with NULL values for thellier_gui and demag_gui
acceptance criteria format is doctionaries:
acceptance_criteria={}
acceptance_criteria[crit]={}
acceptance_criteria[crit]['category']=
acceptance_criteria[crit]['criterion_name']=
acceptance_criteria[crit]['value']=
acceptance_criteria[crit]['threshold_type']
acceptance_criteria[crit]['decimal_points']
'category':
'DE-SPEC','DE-SAMP'..etc
'criterion_name':
MagIC name
'value':
a number (for 'regular criteria')
a string (for 'flag')
1 for True (if criteria is bullean)
0 for False (if criteria is bullean)
-999 means N/A
'threshold_type':
'low'for low threshold value
'high'for high threshold value
[flag1.flag2]: for flags
'bool' for boolean flags (can be 'g','b' or True/Flase or 1/0)
'decimal_points':
number of decimal points in rounding
(this is used in displaying criteria in the dialog box)
-999 means Exponent with 3 descimal points for floats and string for string
'''
acceptance_criteria = {}
# --------------------------------
# 'DE-SPEC'
# --------------------------------
# low cutoff value
category = 'DE-SPEC'
for crit in ['specimen_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
category = 'DE-SPEC'
for crit in ['specimen_mad', 'specimen_dang', 'specimen_alpha95']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
acceptance_criteria[crit]['decimal_points'] = 1
# flag
for crit in ['specimen_direction_type']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
if crit == 'specimen_direction_type':
acceptance_criteria[crit]['threshold_type'] = ['l', 'p']
if crit == 'specimen_polarity':
acceptance_criteria[crit]['threshold_type'] = [
'n', 'r', 't', 'e', 'i']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'DE-SAMP'
# --------------------------------
# low cutoff value
category = 'DE-SAMP'
for crit in ['sample_n', 'sample_n_lines', 'sample_n_planes']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
category = 'DE-SAMP'
for crit in ['sample_r', 'sample_alpha95', 'sample_sigma', 'sample_k', 'sample_tilt_correction']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['sample_tilt_correction']:
acceptance_criteria[crit]['decimal_points'] = 0
elif crit in ['sample_alpha95']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# flag
for crit in ['sample_direction_type', 'sample_polarity']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
if crit == 'sample_direction_type':
acceptance_criteria[crit]['threshold_type'] = ['l', 'p']
if crit == 'sample_polarity':
acceptance_criteria[crit]['threshold_type'] = [
'n', 'r', 't', 'e', 'i']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'DE-SITE'
# --------------------------------
# low cutoff value
category = 'DE-SITE'
for crit in ['site_n', 'site_n_lines', 'site_n_planes']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['site_k', 'site_r', 'site_alpha95', 'site_sigma', 'site_tilt_correction']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['site_tilt_correction']:
acceptance_criteria[crit]['decimal_points'] = 0
else:
acceptance_criteria[crit]['decimal_points'] = 1
# flag
for crit in ['site_direction_type', 'site_polarity']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
if crit == 'site_direction_type':
acceptance_criteria[crit]['threshold_type'] = ['l', 'p']
if crit == 'site_polarity':
acceptance_criteria[crit]['threshold_type'] = [
'n', 'r', 't', 'e', 'i']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'DE-STUDY'
# --------------------------------
category = 'DE-STUDY'
# low cutoff value
for crit in ['average_k', 'average_n', 'average_nn', 'average_nnn', 'average_r']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
if crit in ['average_n', 'average_nn', 'average_nnn']:
acceptance_criteria[crit]['decimal_points'] = 0
elif crit in ['average_alpha95']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# high cutoff value
for crit in ['average_alpha95', 'average_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['average_alpha95']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'IE-SPEC' (a long list from SPD.v.1.0)
# --------------------------------
category = 'IE-SPEC'
# low cutoff value
for crit in ['specimen_int_n', 'specimen_f', 'specimen_fvds', 'specimen_frac', 'specimen_q', 'specimen_w', 'specimen_r_sq', 'specimen_int_ptrm_n',
'specimen_int_ptrm_tail_n', 'specimen_ac_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
if crit in ['specimen_int_n', 'specimen_int_ptrm_n', 'specimen_int_ptrm_tail_n', 'specimen_ac_n']:
acceptance_criteria[crit]['decimal_points'] = 0
elif crit in ['specimen_f', 'specimen_fvds', 'specimen_frac', 'specimen_q']:
acceptance_criteria[crit]['decimal_points'] = 2
else:
acceptance_criteria[crit]['decimal_points'] = -999
# high cutoff value
for crit in ['specimen_b_sigma', 'specimen_b_beta', 'specimen_g', 'specimen_gmax', 'specimen_k', 'specimen_k_sse', 'specimen_k_prime', 'specimen_k_prime_sse',
'specimen_coeff_det_sq', 'specimen_z', 'specimen_z_md', 'specimen_int_mad', 'specimen_int_mad_anc', 'specimen_int_alpha', 'specimen_alpha', 'specimen_alpha_prime',
'specimen_theta', 'specimen_int_dang', 'specimen_int_crm', 'specimen_ptrm', 'specimen_dck', 'specimen_drat', 'specimen_maxdev', 'specimen_cdrat',
'specimen_drats', 'specimen_mdrat', 'specimen_mdev', 'specimen_dpal', 'specimen_tail_drat', 'specimen_dtr', 'specimen_md', 'specimen_dt', 'specimen_dac', 'specimen_gamma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['specimen_int_mad', 'specimen_int_mad_anc', 'specimen_int_dang', 'specimen_drat', 'specimen_cdrat', 'specimen_drats', 'specimen_tail_drat', 'specimen_dtr', 'specimen_md', 'specimen_dac', 'specimen_gamma']:
acceptance_criteria[crit]['decimal_points'] = 1
elif crit in ['specimen_gmax']:
acceptance_criteria[crit]['decimal_points'] = 2
elif crit in ['specimen_b_sigma', 'specimen_b_beta', 'specimen_g', 'specimen_k', 'specimen_k_prime']:
acceptance_criteria[crit]['decimal_points'] = 3
else:
acceptance_criteria[crit]['decimal_points'] = -999
# flags
for crit in ['specimen_scat']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = 'bool'
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'IE-SAMP'
# --------------------------------
category = 'IE-SAMP'
# low cutoff value
for crit in ['sample_int_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['sample_int_rel_sigma', 'sample_int_rel_sigma_perc', 'sample_int_sigma', 'sample_int_sigma_perc']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['sample_int_rel_sigma_perc', 'sample_int_sigma_perc']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'IE-SITE'
# --------------------------------
category = 'IE-SITE'
# low cutoff value
for crit in ['site_int_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['site_int_rel_sigma', 'site_int_rel_sigma_perc', 'site_int_sigma', 'site_int_sigma_perc']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['site_int_rel_sigma_perc', 'site_int_sigma_perc']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'IE-STUDY'
# --------------------------------
category = 'IE-STUDY'
# low cutoff value
for crit in ['average_int_n', 'average_int_n', 'average_int_nn', 'average_int_nnn', ]:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['average_int_rel_sigma', 'average_int_rel_sigma_perc', 'average_int_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['average_int_rel_sigma_perc']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'NPOLE'
# --------------------------------
category = 'NPOLE'
# flags
for crit in ['site_polarity']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = ['n', 'r']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'NPOLE'
# --------------------------------
category = 'RPOLE'
# flags
for crit in ['site_polarity']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = ['n', 'r']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'VADM'
# --------------------------------
category = 'VADM'
# low cutoff value
for crit in ['vadm_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
if crit in ['vadm_n']:
acceptance_criteria[crit]['decimal_points'] = 0
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'VADM'
# --------------------------------
category = 'VADM'
# low cutoff value
for crit in ['vadm_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['vadm_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'VADM'
# --------------------------------
category = 'VDM'
# low cutoff value
for crit in ['vdm_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['vdm_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'VGP'
# --------------------------------
category = 'VDM'
# low cutoff value
for crit in ['vgp_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['vgp_alpha95', 'vgp_dm', 'vgp_dp', 'vgp_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
if crit in ['vgp_alpha95']:
acceptance_criteria[crit]['decimal_points', 'vgp_dm', 'vgp_dp'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'AGE'
# --------------------------------
category = 'AGE'
# low cutoff value
for crit in ['average_age_min']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = -999
# high cutoff value
for crit in ['average_age_max', 'average_age_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
acceptance_criteria[crit]['decimal_points'] = -999
# flags
for crit in ['average_age_unit']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = [
'Ga', 'Ka', 'Ma', 'Years AD (+/-)', 'Years BP', 'Years Cal AD (+/-)', 'Years Cal BP']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'ANI'
# --------------------------------
category = 'ANI'
# high cutoff value
for crit in ['anisotropy_alt', 'sample_aniso_mean', 'site_aniso_mean']: # value is in precent
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
acceptance_criteria[crit]['decimal_points'] = 3
# flags
for crit in ['specimen_aniso_ftest_flag']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = 'bool'
acceptance_criteria[crit]['decimal_points'] = -999
return(acceptance_criteria) | 0.001014 |
def add_query(self, sql, auto_begin=True, bindings=None,
abridge_sql_log=False):
"""Add a query to the current transaction. A thin wrapper around
ConnectionManager.add_query.
:param str sql: The SQL query to add
:param bool auto_begin: If set and there is no transaction in progress,
begin a new one.
:param Optional[List[object]]: An optional list of bindings for the
query.
:param bool abridge_sql_log: If set, limit the raw sql logged to 512
characters
"""
return self.connections.add_query(sql, auto_begin, bindings,
abridge_sql_log) | 0.004298 |
def _read_dict(self, data_dict, layer=None, source=None):
"""Load a dictionary into the ConfigTree. If the dict contains nested dicts
then the values will be added recursively. See module docstring for example code.
Parameters
----------
data_dict : dict
source data
layer : str
layer to load data into. If none is supplied the outermost one is used
source : str
Source to attribute the values to
"""
for k, v in data_dict.items():
self._set_with_metadata(k, v, layer, source) | 0.008375 |
def check_bucket_exists(self, bucket: str) -> bool:
"""
Checks if bucket with specified name exists.
:param bucket: the bucket to be checked.
:return: true if specified bucket exists.
"""
bucket_obj = self.gcp_client.bucket(bucket) # type: Bucket
return bucket_obj.exists() | 0.006061 |
def phonetic(s, method, concat=True, encoding='utf-8', decode_error='strict'):
"""Convert names or strings into phonetic codes.
The implemented algorithms are `soundex
<https://en.wikipedia.org/wiki/Soundex>`_, `nysiis
<https://en.wikipedia.org/wiki/New_York_State_Identification_and_
Intelligence_System>`_, `metaphone
<https://en.wikipedia.org/wiki/Metaphone>`_ or `match_rating
<https://en.wikipedia.org/wiki/Match_rating_approach>`_.
Parameters
----------
s : pandas.Series
A pandas.Series with string values (often names) to encode.
method: str
The algorithm that is used to phonetically encode the values.
The possible options are "soundex", "nysiis", "metaphone" or
"match_rating".
concat: bool, optional
Remove whitespace before phonetic encoding.
encoding: str, optional
If bytes are given, this encoding is used to decode. Default
is 'utf-8'.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte Series is given that
contains characters not of the given `encoding`. By default,
it is 'strict', meaning that a UnicodeDecodeError will be
raised. Other values are 'ignore' and 'replace'.
Returns
-------
pandas.Series
A Series with phonetic encoded values.
"""
# encoding
if sys.version_info[0] == 2:
s = s.apply(
lambda x: x.decode(encoding, decode_error)
if type(x) == bytes else x)
if concat:
s = s.str.replace(r"[\-\_\s]", "")
for alg in _phonetic_algorithms:
if method in alg['argument_names']:
phonetic_callback = alg['callback']
break
else:
raise ValueError("The algorithm '{}' is not known.".format(method))
return s.str.upper().apply(
lambda x: phonetic_callback(x) if pandas.notnull(x) else np.nan
) | 0.000513 |
def get_id_fields(self):
"""
Called to return a list of fields consisting of, at minimum,
the PK field name. The output of this method is used to
construct a Prefetch object with a .only() queryset
when this field is not being sideloaded but we need to
return a list of IDs.
"""
model = self.get_model()
out = [model._meta.pk.name] # get PK field name
# If this is being called, it means it
# is a many-relation to its parent.
# Django wants the FK to the parent,
# but since accurately inferring the FK
# pointing back to the parent is less than trivial,
# we will just pull all ID fields.
# TODO: We also might need to return all non-nullable fields,
# or else it is possible Django will issue another request.
for field in model._meta.fields:
if isinstance(field, models.ForeignKey):
out.append(field.name + '_id')
return out | 0.001969 |
def _checkForOrphanedModels (self):
"""If there are any models that haven't been updated in a while, consider
them dead, and mark them as hidden in our resultsDB. We also change the
paramsHash and particleHash of orphaned models so that we can
re-generate that particle and/or model again if we desire.
Parameters:
----------------------------------------------------------------------
retval:
"""
self.logger.debug("Checking for orphaned models older than %s" % \
(self._modelOrphanIntervalSecs))
while True:
orphanedModelId = self._cjDAO.modelAdoptNextOrphan(self._jobID,
self._modelOrphanIntervalSecs)
if orphanedModelId is None:
return
self.logger.info("Removing orphaned model: %d" % (orphanedModelId))
# Change the model hash and params hash as stored in the models table so
# that we can insert a new model with the same paramsHash
for attempt in range(100):
paramsHash = hashlib.md5("OrphanParams.%d.%d" % (orphanedModelId,
attempt)).digest()
particleHash = hashlib.md5("OrphanParticle.%d.%d" % (orphanedModelId,
attempt)).digest()
try:
self._cjDAO.modelSetFields(orphanedModelId,
dict(engParamsHash=paramsHash,
engParticleHash=particleHash))
success = True
except:
success = False
if success:
break
if not success:
raise RuntimeError("Unexpected failure to change paramsHash and "
"particleHash of orphaned model")
# Mark this model as complete, with reason "orphaned"
self._cjDAO.modelSetCompleted(modelID=orphanedModelId,
completionReason=ClientJobsDAO.CMPL_REASON_ORPHAN,
completionMsg="Orphaned")
# Update our results DB immediately, rather than wait for the worker
# to inform us. This insures that the getParticleInfos() calls we make
# below don't include this particle. Setting the metricResult to None
# sets it to worst case
self._resultsDB.update(modelID=orphanedModelId,
modelParams=None,
modelParamsHash=paramsHash,
metricResult=None,
completed = True,
completionReason = ClientJobsDAO.CMPL_REASON_ORPHAN,
matured = True,
numRecords = 0) | 0.013603 |
def register_laser_hooks(self, hook_type: str, hook: Callable):
"""registers the hook with this Laser VM"""
if hook_type == "add_world_state":
self._add_world_state_hooks.append(hook)
elif hook_type == "execute_state":
self._execute_state_hooks.append(hook)
elif hook_type == "start_sym_exec":
self._start_sym_exec_hooks.append(hook)
elif hook_type == "stop_sym_exec":
self._stop_sym_exec_hooks.append(hook)
elif hook_type == "start_sym_trans":
self._start_sym_trans_hooks.append(hook)
elif hook_type == "stop_sym_trans":
self._stop_sym_trans_hooks.append(hook)
else:
raise ValueError(
"Invalid hook type %s. Must be one of {add_world_state}", hook_type
) | 0.00361 |
def donotify(nick, rest):
"notify <nick> <message>"
opts = rest.split(' ')
to = opts[0]
Notify.store.notify(nick, to, ' '.join(opts[1:]))
return "Will do!" | 0.0375 |
def write_debug_log(self, file_path):
""" Write the debug log to a file """
with open(file_path, "wb+") as fh:
fh.write(system.get_system_info().encode('utf-8'))
# writing to debug stream
self._debug_stream.seek(0)
fh.write(self._debug_stream.read().encode('utf-8'))
fh.write("The following errors occured:\n".encode('utf-8'))
for error in self._errors:
fh.write((error + "\n").encode('utf-8'))
for k, v in self._error_dict.items():
if len(v) > 0:
fh.write(("Error(s) in %s with formula %s:\n" % k).encode('utf-8'))
for error in v:
fh.write((error + "\n").encode('utf-8')) | 0.003906 |
def upload_token(
self,
bucket,
key=None,
expires=3600,
policy=None,
strict_policy=True):
"""生成上传凭证
Args:
bucket: 上传的空间名
key: 上传的文件名,默认为空
expires: 上传凭证的过期时间,默认为3600s
policy: 上传策略,默认为空
Returns:
上传凭证
"""
if bucket is None or bucket == '':
raise ValueError('invalid bucket name')
scope = bucket
if key is not None:
scope = '{0}:{1}'.format(bucket, key)
args = dict(
scope=scope,
deadline=int(time.time()) + expires,
)
if policy is not None:
self.__copy_policy(policy, args, strict_policy)
return self.__upload_token(args) | 0.002469 |
def _set_next_hop_mpls(self, v, load=False):
"""
Setter method for next_hop_mpls, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/next_hop_mpls (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_next_hop_mpls is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_next_hop_mpls() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=next_hop_mpls.next_hop_mpls, is_container='container', presence=True, yang_name="next-hop-mpls", rest_name="next-hop-mpls", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Changes for IPoMPLS route download, pkt path.'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """next_hop_mpls must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=next_hop_mpls.next_hop_mpls, is_container='container', presence=True, yang_name="next-hop-mpls", rest_name="next-hop-mpls", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Changes for IPoMPLS route download, pkt path.'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__next_hop_mpls = t
if hasattr(self, '_set'):
self._set() | 0.005701 |
def get_wigner_seitz_cell(self) -> List[List[np.ndarray]]:
"""
Returns the Wigner-Seitz cell for the given lattice.
Returns:
A list of list of coordinates.
Each element in the list is a "facet" of the boundary of the
Wigner Seitz cell. For instance, a list of four coordinates will
represent a square facet.
"""
vec1 = self._matrix[0]
vec2 = self._matrix[1]
vec3 = self._matrix[2]
list_k_points = []
for i, j, k in itertools.product([-1, 0, 1], [-1, 0, 1], [-1, 0, 1]):
list_k_points.append(i * vec1 + j * vec2 + k * vec3)
from scipy.spatial import Voronoi
tess = Voronoi(list_k_points)
to_return = []
for r in tess.ridge_dict:
if r[0] == 13 or r[1] == 13:
to_return.append([tess.vertices[i] for i in tess.ridge_dict[r]])
return to_return | 0.003185 |
def write(self, valuedict):
"""Returns the lines that this template line should add to the input file."""
if self.identifier in valuedict:
value = valuedict[self.identifier]
elif self.default is not None:
value = self.default
elif self.fromtag is not None and self.fromtag in valuedict:
if self.operator == "count":
value = len(valuedict[self.fromtag])
else:
msg.err("referenced 'from' attribute/operator {} not in xml dictionary.".format(self.fromtag))
exit(1)
else:
msg.err("a required line {} had no value or default specified.".format(self.identifier))
exit(1)
#Before we generate the result, validate the choices if they exist
if len(self.choices) > 0:
for single in value:
if str(single) not in self.choices:
msg.warn("failed choices validation for {} in {} (line {})".format(
single, self.choices, self.identifier))
result = []
#Get the string representation of the value
if isinstance(value, list):
sval = " ".join([ str(val) for val in value])
else:
sval = str(value)
if self.comment != "" and (self.nvalues < 0 or self.nvalues > 5):
#We will put the comments on a separate line from the actual values.
result.append(self.comment)
result.append(sval)
else:
result.append("{} {}".format(sval, self.comment))
return result | 0.007922 |
def chi_square_distance(point1, point2):
"""!
@brief Calculate Chi square distance between two vectors.
\f[
dist(a, b) = \sum_{i=0}^{N}\frac{\left ( a_{i} - b_{i} \right )^{2}}{\left | a_{i} \right | + \left | b_{i} \right |};
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (float) Chi square distance between two objects.
"""
distance = 0.0
for i in range(len(point1)):
divider = abs(point1[i]) + abs(point2[i])
if divider == 0.0:
continue
distance += ((point1[i] - point2[i]) ** 2.0) / divider
return distance | 0.008708 |
def read_detail(self, request):
"""
Implements the Read Detail (read an object)
maps to GET /api/objects/:id/ in rest semantics
:param request: rip.Request
:return: rip.Response
"""
pipeline = crud_pipeline_factory.read_detail_pipeline(
configuration=self.configuration)
return pipeline(request=request) | 0.005263 |
def collect_results(rule, max_results=500, result_stream_args=None):
"""
Utility function to quickly get a list of tweets from a ``ResultStream``
without keeping the object around. Requires your args to be configured
prior to using.
Args:
rule (str): valid powertrack rule for your account, preferably
generated by the `gen_rule_payload` function.
max_results (int): maximum number of tweets or counts to return from
the API / underlying ``ResultStream`` object.
result_stream_args (dict): configuration dict that has connection
information for a ``ResultStream`` object.
Returns:
list of results
Example:
>>> from searchtweets import collect_results
>>> tweets = collect_results(rule,
max_results=500,
result_stream_args=search_args)
"""
if result_stream_args is None:
logger.error("This function requires a configuration dict for the "
"inner ResultStream object.")
raise KeyError
rs = ResultStream(rule_payload=rule,
max_results=max_results,
**result_stream_args)
return list(rs.stream()) | 0.000789 |
def _construct_permission(self, function, source_arn=None, source_account=None, suffix="", event_source_token=None):
"""Constructs the Lambda Permission resource allowing the source service to invoke the function this event
source triggers.
:returns: the permission resource
:rtype: model.lambda_.LambdaPermission
"""
lambda_permission = LambdaPermission(self.logical_id + 'Permission' + suffix,
attributes=function.get_passthrough_resource_attributes())
try:
# Name will not be available for Alias resources
function_name_or_arn = function.get_runtime_attr("name")
except NotImplementedError:
function_name_or_arn = function.get_runtime_attr("arn")
lambda_permission.Action = 'lambda:invokeFunction'
lambda_permission.FunctionName = function_name_or_arn
lambda_permission.Principal = self.principal
lambda_permission.SourceArn = source_arn
lambda_permission.SourceAccount = source_account
lambda_permission.EventSourceToken = event_source_token
return lambda_permission | 0.005106 |
def assessModel(self, target: str, prediction: str, nominal: bool = True, event: str = '', **kwargs):
"""
This method will calculate assessment measures using the SAS AA_Model_Eval Macro used for SAS Enterprise Miner.
Not all datasets can be assessed. This is designed for scored data that includes a target and prediction columns
TODO: add code example of build, score, and then assess
:param target: string that represents the target variable in the data
:param prediction: string that represents the numeric prediction column in the data. For nominal targets this should a probability between (0,1).
:param nominal: boolean to indicate if the Target Variable is nominal because the assessment measures are different.
:param event: string which indicates which value of the nominal target variable is the event vs non-event
:param kwargs:
:return: SAS result object
"""
# submit autocall macro
self.sas.submit("%aamodel;")
objtype = "datastep"
objname = '{s:{c}^{n}}'.format(s=self.table[:3], n=3,
c='_') + self.sas._objcnt() # translate to a libname so needs to be less than 8
code = "%macro proccall(d);\n"
# build parameters
score_table = str(self.libref + '.' + self.table)
binstats = str(objname + '.' + "ASSESSMENTSTATISTICS")
out = str(objname + '.' + "ASSESSMENTBINSTATISTICS")
level = 'interval'
# var = 'P_' + target
if nominal:
level = 'class'
# the user didn't specify the event for a nominal Give them the possible choices
try:
if len(event) < 1:
raise Exception(event)
except Exception:
print("No event was specified for a nominal target. Here are possible options:\n")
event_code = "proc hpdmdb data=%s.%s %s classout=work._DMDBCLASSTARGET(keep=name nraw craw level frequency nmisspercent);" % (
self.libref, self.table, self._dsopts())
event_code += "\nclass %s ; \nrun;" % target
event_code += "data _null_; set work._DMDBCLASSTARGET; where ^(NRAW eq . and CRAW eq '') and lowcase(name)=lowcase('%s');" % target
ec = self.sas._io.submit(event_code)
HTML(ec['LST'])
# TODO: Finish output of the list of nominals variables
if nominal:
code += "%%aa_model_eval(DATA=%s%s, TARGET=%s, VAR=%s, level=%s, BINSTATS=%s, bins=100, out=%s, EVENT=%s);" \
% (score_table, self._dsopts(), target, prediction, level, binstats, out, event)
else:
code += "%%aa_model_eval(DATA=%s%s, TARGET=%s, VAR=%s, level=%s, BINSTATS=%s, bins=100, out=%s);" \
% (score_table, self._dsopts(), target, prediction, level, binstats, out)
rename_char = """
data {0};
set {0};
if level in ("INTERVAL", "INT") then do;
rename _sse_ = SumSquaredError
_div_ = Divsor
_ASE_ = AverageSquaredError
_RASE_ = RootAverageSquaredError
_MEANP_ = MeanPredictionValue
_STDP_ = StandardDeviationPrediction
_CVP_ = CoefficientVariationPrediction;
end;
else do;
rename CR = MaxClassificationRate
KSCut = KSCutOff
CRDEPTH = MaxClassificationDepth
MDepth = MedianClassificationDepth
MCut = MedianEventDetectionCutOff
CCut = ClassificationCutOff
_misc_ = MisClassificationRate;
end;
run;
"""
code += rename_char.format(binstats)
if nominal:
# TODO: add graphics code here to return to the SAS results object
graphics ="""
ODS PROCLABEL='ERRORPLOT' ;
proc sgplot data={0};
title "Error and Correct rate by Depth";
series x=depth y=correct_rate;
series x=depth y=error_rate;
yaxis label="Percentage" grid;
run;
/* roc chart */
ODS PROCLABEL='ROCPLOT' ;
proc sgplot data={0};
title "ROC Curve";
series x=one_minus_specificity y=sensitivity;
yaxis grid;
run;
/* Lift and Cumulative Lift */
ODS PROCLABEL='LIFTPLOT' ;
proc sgplot data={0};
Title "Lift and Cumulative Lift";
series x=depth y=c_lift;
series x=depth y=lift;
yaxis grid;
run;
"""
code += graphics.format(out)
code += "run; quit; %mend;\n"
code += "%%mangobj(%s,%s,%s);" % (objname, objtype, self.table)
if self.sas.nosub:
print(code)
return
ll = self.sas.submit(code, 'text')
obj1 = sp2.SASProcCommons._objectmethods(self, objname)
return sp2.SASresults(obj1, self.sas, objname, self.sas.nosub, ll['LOG']) | 0.003575 |
def sampling_query(sql, fields=None, count=5, sampling=None):
"""Returns a sampling query for the SQL object.
Args:
sql: the SQL object to sample
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the table.
Returns:
A SQL query string for sampling the input sql.
"""
if sampling is None:
sampling = Sampling.default(count=count, fields=fields)
return sampling(sql) | 0.00339 |
def information_content(self):
"""Return the total information content of the motif.
Return
------
ic : float
Motif information content.
"""
ic = 0
for row in self.pwm:
ic += 2.0 + np.sum([row[x] * log(row[x])/log(2) for x in range(4) if row[x] > 0])
return ic | 0.008596 |
def canFetchMore(self, parentIndex):
""" Returns true if there is more data available for parent; otherwise returns false.
"""
parentItem = self.getItem(parentIndex)
if not parentItem:
return False
return parentItem.canFetchChildren() | 0.010453 |
def ints(l, ifilter=lambda x: x, idescr=None):
""" Parses a comma-separated list of ints. """
if isinstance(l, string_types):
if l[0] == '[' and l[-1] == ']':
l = l[1:-1]
l = list(map(lambda x: x.strip(), l.split(',')))
try:
l = list(map(ifilter, list(map(int, l))))
except:
raise ValueError("Bad list of {}integers"
.format("" if idescr is None else idescr + " "))
return l | 0.012959 |
def add_column(self, table, name='ID', data_type='int(11)', after_col=None, null=False, primary_key=False):
"""Add a column to an existing table."""
location = 'AFTER {0}'.format(after_col) if after_col else 'FIRST'
null_ = 'NULL' if null else 'NOT NULL'
comment = "COMMENT 'Column auto created by mysql-toolkit'"
pk = 'AUTO_INCREMENT PRIMARY KEY {0}'.format(comment) if primary_key else ''
query = 'ALTER TABLE {0} ADD COLUMN {1} {2} {3} {4} {5}'.format(wrap(table), name, data_type, null_, pk,
location)
self.execute(query)
self._printer("\tAdded column '{0}' to '{1}' {2}".format(name, table, '(Primary Key)' if primary_key else ''))
return name | 0.008838 |
def closest(xarr, val):
""" Return the index of the closest in xarr to value val """
idx_closest = np.argmin(np.abs(np.array(xarr) - val))
return idx_closest | 0.005917 |
def _find_usage_api_keys(self):
"""
Find usage on API Keys.
Update `self.limits`.
"""
logger.debug('Finding usage for API Keys')
key_count = 0
paginator = self.conn.get_paginator('get_api_keys')
for resp in paginator.paginate():
key_count += len(resp['items'])
self.limits['API keys per account']._add_current_usage(
key_count, aws_type='AWS::ApiGateway::ApiKey'
) | 0.004274 |
def set_data_type(self, data_type):
"""Set the data type for ths data point
The data type is actually associated with the stream itself and should
not (generally) vary on a point-per-point basis. That being said, if
creating a new stream by writing a datapoint, it may be beneficial to
include this information.
The data type provided should be in the set of available data types of
{ INTEGER, LONG, FLOAT, DOUBLE, STRING, BINARY, UNKNOWN }.
"""
validate_type(data_type, type(None), *six.string_types)
if isinstance(data_type, *six.string_types):
data_type = str(data_type).upper()
if not data_type in ({None} | set(DSTREAM_TYPE_MAP.keys())):
raise ValueError("Provided data type not in available set of types")
self._data_type = data_type | 0.004635 |
def get_keys(self, bucket, timeout=None):
"""
Lists all keys within a bucket.
"""
msg_code = riak.pb.messages.MSG_CODE_LIST_KEYS_REQ
codec = self._get_codec(msg_code)
stream = self.stream_keys(bucket, timeout=timeout)
return codec.decode_get_keys(stream) | 0.006452 |
def _find_weektime(datetime, time_type='min'):
"""
Finds the minutes/seconds aways from midnight between Sunday and Monday.
Parameters
----------
datetime : datetime
The date and time that needs to be converted.
time_type : 'min' or 'sec'
States whether the time difference should be specified in seconds or minutes.
"""
if time_type == 'sec':
return datetime.weekday() * 24 * 60 * 60 + datetime.hour * 60 * 60 + datetime.minute * 60 + datetime.second
elif time_type == 'min':
return datetime.weekday() * 24 * 60 + datetime.hour * 60 + datetime.minute
else:
raise ValueError("Invalid time type specified.") | 0.005814 |
def get_ioos_def(self, ident, elem_type, ont):
"""Gets a definition given an identifier and where to search for it"""
if elem_type == "identifier":
getter_fn = self.system.get_identifiers_by_name
elif elem_type == "classifier":
getter_fn = self.system.get_classifiers_by_name
else:
raise ValueError("Unknown element type '{}'".format(elem_type))
return DescribeSensor.get_named_by_definition(
getter_fn(ident), urljoin(ont, ident)
) | 0.003788 |
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary. By default, the setting in
``pandas.options.display.max_info_columns`` is followed.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used.
memory_usage : bool, str, optional
Specifies whether total memory usage of the DataFrame
elements (including the index) should be displayed. By default,
this follows the ``pandas.options.display.memory_usage`` setting.
True always show memory usage. False never shows memory usage.
A value of 'deep' is equivalent to "True with deep introspection".
Memory usage is shown in human-readable units (base-2
representation). Without deep introspection a memory estimation is
made based in column dtype and number of rows assuming values
consume the same memory amount for corresponding dtypes. With deep
memory introspection, a real memory usage calculation is performed
at the cost of computational resources.
null_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the frame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
int_col 5 non-null int64
text_col 5 non-null object
float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w",
... encoding="utf-8") as f: # doctest: +SKIP
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 188.8 MB
"""
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index._summary())
if len(self.columns) == 0:
lines.append('Empty {name}'.format(name=type(self).__name__))
fmt.buffer_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option('display.max_info_columns',
len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max(len(pprint_thing(k)) for k in self.columns) + 4
counts = None
tmpl = "{count}{dtype}"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError(
'Columns must equal counts '
'({cols:d} != {counts:d})'.format(
cols=len(cols), counts=len(counts)))
tmpl = "{count} non-null {dtype}"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) + tmpl.format(count=count,
dtype=dtype))
def _non_verbose_repr():
lines.append(self.columns._summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return ("{num:3.1f}{size_q} "
"{x}".format(num=num, size_q=size_qualifier, x=x))
num /= 1024.0
return "{num:3.1f}{size_q} {pb}".format(num=num,
size_q=size_qualifier,
pb='PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['{k}({kk:d})'.format(k=k[0], kk=k[1]) for k
in sorted(counts.items())]
lines.append('dtypes: {types}'.format(types=', '.join(dtypes)))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage:
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if ('object' in counts or
self.index._is_memory_usage_qualified()):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: {mem}\n".format(
mem=_sizeof_fmt(mem_usage, size_qualifier)))
fmt.buffer_put_lines(buf, lines) | 0.000315 |
def show_window_option(self, option, g=False):
"""
Return a list of options for the window.
todo: test and return True/False for on/off string
Parameters
----------
option : str
g : bool, optional
Pass ``-g`` flag, global. Default False.
Returns
-------
str, int
Raises
------
:exc:`exc.OptionError`, :exc:`exc.UnknownOption`,
:exc:`exc.InvalidOption`, :exc:`exc.AmbiguousOption`
"""
tmux_args = tuple()
if g:
tmux_args += ('-g',)
tmux_args += (option,)
cmd = self.cmd('show-window-options', *tmux_args)
if len(cmd.stderr):
handle_option_error(cmd.stderr[0])
if not len(cmd.stdout):
return None
option = [shlex.split(item) for item in cmd.stdout][0]
if option[1].isdigit():
option = (option[0], int(option[1]))
return option[1] | 0.002016 |
def to_df(self):
'''Conversion method to Pandas DataFrame. To be attached to ResultDict.
Returns
-------
main_effect, inter_effect: tuple
A tuple of DataFrames for main effects and interaction effects.
The second element (for interactions) will be `None` if not available.
'''
names = self['names']
main_effect = self['ME']
interactions = self.get('IE', None)
inter_effect = None
if interactions:
interaction_names = self.get('interaction_names')
names = [name for name in names if not isinstance(name, list)]
inter_effect = pd.DataFrame({'IE': interactions},
index=interaction_names)
main_effect = pd.DataFrame({'ME': main_effect}, index=names)
return main_effect, inter_effect | 0.001241 |
def _process_args(args, kwargs, prefer_local=True, recurse=True):
"""Select local or remote execution and prepare arguments accordingly.
Assumes any remote args have already been moved to a common engine.
Local execution will be chosen if:
- all args are ordinary objects or Remote instances on the local engine; or
- the local cache of all remote args is current, and prefer_local is True.
Otherwise, remote execution will be chosen.
For remote execution, replaces any remote arg with its Id.
For local execution, replaces any remote arg with its locally cached object
Any arguments or kwargs that are Sequences will be recursed one level deep.
Args:
args (list)
kwargs (dict)
prefer_local (bool, optional): Whether cached local results are prefered
if available, instead of returning Remote objects. Default is True.
"""
this_engine = distob.engine.eid
local_args = []
remote_args = []
execloc = this_engine # the chosen engine id for execution of the call
for a in args:
id = None
if isinstance(a, Remote):
id = a._ref.id
elif isinstance(a, Ref):
id = a.id
elif isinstance(a, Id):
id = a
if id is not None:
if id.engine is this_engine:
local_args.append(distob.engine[id])
remote_args.append(distob.engine[id])
else:
if (prefer_local and isinstance(a, Remote) and
a._obcache_current):
local_args.append(a._obcache)
remote_args.append(id)
else:
# will choose remote execution
if execloc is not this_engine and id.engine is not execloc:
raise DistobValueError(
'two remote arguments are from different engines')
else:
execloc = id.engine
local_args.append(None)
remote_args.append(id)
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types) and recurse):
eid, ls, _ = _process_args(a, {}, prefer_local, recurse=False)
if eid is not this_engine:
if execloc is not this_engine and eid is not execloc:
raise DistobValueError(
'two remote arguments are from different engines')
execloc = eid
local_args.append(ls)
remote_args.append(ls)
else:
# argument is an ordinary object
local_args.append(a)
remote_args.append(a)
local_kwargs = dict()
remote_kwargs = dict()
for k, a in kwargs.items():
id = None
if isinstance(a, Remote):
id = a._ref.id
elif isinstance(a, Ref):
id = a.id
elif isinstance(a, Id):
id = a
if id is not None:
if id.engine is this_engine:
local_kwargs[k] = distob.engine[id]
remote_kwargs[k] = distob.engine[id]
else:
if (prefer_local and isinstance(a, Remote) and
a._obcache_current):
local_kwargs[k] = a._obcache
remote_kwargs[k] = id
else:
# will choose remote execution
if execloc is not this_engine and id.engine is not execloc:
raise DistobValueError(
'two remote arguments are from different engines')
else:
execloc = id.engine
local_kwargs[k] = None
remote_kwargs[k] = id
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types) and recurse):
eid, ls, _ = _process_args(a, {}, prefer_local, recurse=False)
if eid is not this_engine:
if execloc is not this_engine and eid is not execloc:
raise DistobValueError(
'two remote arguments are from different engines')
execloc = eid
local_kwargs[k] = ls
remote_kwargs[k] = ls
else:
# argument is an ordinary object
local_kwargs[k] = a
remote_kwargs[k] = a
if execloc is this_engine:
return execloc, tuple(local_args), local_kwargs
else:
return execloc, tuple(remote_args), remote_kwargs | 0.000861 |
def _all(cls, verb):
"""
A verb
"""
groups = set(_get_groups(verb))
return [col for col in verb.data if col not in groups] | 0.012346 |
def pipe(engine, format, data, renderer=None, formatter=None, quiet=False):
"""Return ``data`` piped through Graphviz ``engine`` into ``format``.
Args:
engine: The layout commmand used for rendering (``'dot'``, ``'neato'``, ...).
format: The output format used for rendering (``'pdf'``, ``'png'``, ...).
data: The binary (encoded) DOT source string to render.
renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...).
formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...).
quiet (bool): Suppress ``stderr`` output.
Returns:
Binary (encoded) stdout of the layout command.
Raises:
ValueError: If ``engine``, ``format``, ``renderer``, or ``formatter`` are not known.
graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None.
graphviz.ExecutableNotFound: If the Graphviz executable is not found.
subprocess.CalledProcessError: If the exit status is non-zero.
"""
cmd, _ = command(engine, format, None, renderer, formatter)
out, _ = run(cmd, input=data, capture_output=True, check=True, quiet=quiet)
return out | 0.005838 |
def commandfactory(cmdline, mode='global'):
"""
parses `cmdline` and constructs a :class:`Command`.
:param cmdline: command line to interpret
:type cmdline: str
:param mode: mode identifier
:type mode: str
"""
# split commandname and parameters
if not cmdline:
return None
logging.debug('mode:%s got commandline "%s"', mode, cmdline)
# allow to shellescape without a space after '!'
if cmdline.startswith('!'):
cmdline = 'shellescape \'%s\'' % cmdline[1:]
cmdline = re.sub(r'"(.*)"', r'"\\"\1\\""', cmdline)
try:
args = split_commandstring(cmdline)
except ValueError as e:
raise CommandParseError(str(e))
args = [string_decode(x, 'utf-8') for x in args]
logging.debug('ARGS: %s', args)
cmdname = args[0]
args = args[1:]
# unfold aliases
# TODO: read from settingsmanager
# get class, argparser and forced parameter
(cmdclass, parser, forcedparms) = lookup_command(cmdname, mode)
if cmdclass is None:
msg = 'unknown command: %s' % cmdname
logging.debug(msg)
raise CommandParseError(msg)
parms = vars(parser.parse_args(args))
parms.update(forcedparms)
logging.debug('cmd parms %s', parms)
# create Command
cmd = cmdclass(**parms)
# set pre and post command hooks
get_hook = settings.get_hook
cmd.prehook = get_hook('pre_%s_%s' % (mode, cmdname)) or \
get_hook('pre_global_%s' % cmdname)
cmd.posthook = get_hook('post_%s_%s' % (mode, cmdname)) or \
get_hook('post_global_%s' % cmdname)
return cmd | 0.000622 |
def to_unicode(text, charset=None):
"""Convert a `str` object to an `unicode` object.
If `charset` is given, we simply assume that encoding for the text,
but we'll use the "replace" mode so that the decoding will always
succeed.
If `charset` is ''not'' specified, we'll make some guesses, first
trying the UTF-8 encoding, then trying the locale preferred encoding,
in "replace" mode. This differs from the `unicode` builtin, which
by default uses the locale preferred encoding, in 'strict' mode,
and is therefore prompt to raise `UnicodeDecodeError`s.
Because of the "replace" mode, the original content might be altered.
If this is not what is wanted, one could map the original byte content
by using an encoding which maps each byte of the input to an unicode
character, e.g. by doing `unicode(text, 'iso-8859-1')`.
"""
if not isinstance(text, str):
if isinstance(text, Exception):
# two possibilities for storing unicode strings in exception data:
try:
# custom __str__ method on the exception (e.g. PermissionError)
return unicode(text)
except UnicodeError:
# unicode arguments given to the exception (e.g. parse_date)
return ' '.join([to_unicode(arg) for arg in text.args])
return unicode(text)
if charset:
return unicode(text, charset, 'replace')
else:
try:
return unicode(text, 'utf-8')
except UnicodeError:
return unicode(text, locale.getpreferredencoding(), 'replace') | 0.022269 |
def is_username_valid(username):
"""
Check if a valid username.
valid:
oracle
bill-gates
steve.jobs
micro_soft
not valid
Bill Gates - no space allowed
[email protected] - @ is not a valid character
:param username: string
:return:
"""
pattern = re.compile(r"^[a-zA-Z0-9_.-]+$")
return bool(pattern.match(username)) | 0.002564 |
def on_select_mean_type_box(self, event):
"""
set parent Zeq_GUI to reflect change in this box and change the
@param: event -> the wx.ComboBoxEvent that triggered this function
"""
new_mean_type = self.mean_type_box.GetValue()
if new_mean_type == "None":
self.parent.clear_high_level_pars()
self.parent.mean_type_box.SetStringSelection(new_mean_type)
self.parent.onSelect_mean_type_box(event) | 0.004274 |
def getEmpTraitCovar(self):
"""
Returns the empirical trait covariance matrix
"""
if self.P==1:
out=self.Y[self.Iok].var()
else:
out=SP.cov(self.Y[self.Iok].T)
return out | 0.020661 |
def get_local_dist_packages_dir():
"""
Attempts to work around virtualenvs and find the system dist_pacakges.
Essentially this is implmenented as a lookuptable
"""
import utool as ut
if not ut.in_virtual_env():
# Non venv case
return get_site_packages_dir()
else:
candidates = []
if ut.LINUX:
candidates += [
'/usr/local/lib/python2.7/dist-packages',
]
else:
raise NotImplementedError()
for path in candidates:
if ut.checkpath(path):
return path | 0.001664 |
def genes_by_alias(hgnc_genes):
"""Return a dictionary with hgnc symbols as keys
Value of the dictionaries are information about the hgnc ids for a symbol.
If the symbol is primary for a gene then 'true_id' will exist.
A list of hgnc ids that the symbol points to is in ids.
Args:
hgnc_genes(dict): a dictionary with hgnc_id as key and gene info as value
Returns:
alias_genes(dict):
{
'hgnc_symbol':{
'true_id': int,
'ids': list(int)
}
}
"""
alias_genes = {}
for hgnc_id in hgnc_genes:
gene = hgnc_genes[hgnc_id]
# This is the primary symbol:
hgnc_symbol = gene['hgnc_symbol']
for alias in gene['previous_symbols']:
true_id = None
if alias == hgnc_symbol:
true_id = hgnc_id
if alias in alias_genes:
alias_genes[alias.upper()]['ids'].add(hgnc_id)
if true_id:
alias_genes[alias.upper()]['true_id'] = hgnc_id
else:
alias_genes[alias.upper()] = {
'true': true_id,
'ids': set([hgnc_id])
}
return alias_genes | 0.001559 |
def targets_format(self, value):
"""
Setter for **self.__targets_format** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"targets_format", value)
assert os.path.exists(value), "'{0}' attribute: '{1}' file doesn't exists!".format("targets_format", value)
self.__targets_format = value | 0.007843 |
def extract_code(obj, compile_mode):
"""
Generic function for converting objects into instances of `CodeType`.
"""
try:
code = obj.__code__
if isinstance(code, CodeType):
return code
raise ValueError(
"{obj} has a `__code__` attribute, "
"but it's an instance of {notcode!r}, not CodeType.".format(
obj=obj,
notcode=type(code).__name__,
)
)
except AttributeError:
raise ValueError("Don't know how to extract code from %s." % obj) | 0.001757 |
def _get_pooling_layers(self, start_node_id, end_node_id):
"""Given two node IDs, return all the pooling layers between them."""
layer_list = []
node_list = [start_node_id]
assert self._depth_first_search(end_node_id, layer_list, node_list)
ret = []
for layer_id in layer_list:
layer = self.layer_list[layer_id]
if is_layer(layer, "Pooling"):
ret.append(layer)
elif is_layer(layer, "Conv") and layer.stride != 1:
ret.append(layer)
return ret | 0.00354 |
def es_query_proto(path, selects, wheres, schema):
"""
RETURN TEMPLATE AND PATH-TO-FILTER AS A 2-TUPLE
:param path: THE NESTED PATH (NOT INCLUDING TABLE NAME)
:param wheres: MAP FROM path TO LIST OF WHERE CONDITIONS
:return: (es_query, filters_map) TUPLE
"""
output = None
last_where = MATCH_ALL
for p in reversed(sorted( wheres.keys() | set(selects.keys()))):
where = wheres.get(p)
select = selects.get(p)
if where:
where = AndOp(where).partial_eval().to_esfilter(schema)
if output:
where = es_or([es_and([output, where]), where])
else:
if output:
if last_where is MATCH_ALL:
where = es_or([output, MATCH_ALL])
else:
where = output
else:
where = MATCH_ALL
if p == ".":
output = set_default(
{
"from": 0,
"size": 0,
"sort": [],
"query": where
},
select.to_es()
)
else:
output = {"nested": {
"path": p,
"inner_hits": set_default({"size": 100000}, select.to_es()) if select else None,
"query": where
}}
last_where = where
return output | 0.002131 |
def compose_projects_json(projects, data):
""" Compose projects.json with all data sources
:param projects: projects.json
:param data: eclipse JSON
:return: projects.json with all data sources
"""
projects = compose_git(projects, data)
projects = compose_mailing_lists(projects, data)
projects = compose_bugzilla(projects, data)
projects = compose_github(projects, data)
projects = compose_gerrit(projects)
projects = compose_mbox(projects)
return projects | 0.001976 |
def log_call(self, cmd, callwith=subprocess.check_call,
log_level=logging.DEBUG, **kw):
"""Wrap a subprocess call with logging
:param meth: the calling method to use.
"""
logger.log(log_level, "%s> call %r", self.cwd, cmd)
ret = callwith(cmd, **kw)
if callwith == subprocess.check_output:
ret = console_to_str(ret)
return ret | 0.007317 |
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started') | 0.001229 |
def set(self, val):
"""
set value of this param
"""
assert not self.__isReadOnly, \
("This parameter(%s) was locked"
" and now it can not be changed" % self.name)
assert self.replacedWith is None, \
("This param was replaced with new one and this "
"should not exists")
val = toHVal(val)
self.defVal = val
self._val = val.staticEval()
self._dtype = self._val._dtype | 0.004098 |
def connect_edges(graph):
"""
Given a Graph element containing abstract edges compute edge
segments directly connecting the source and target nodes. This
operation just uses internal HoloViews operations and will be a
lot slower than the pandas equivalent.
"""
paths = []
for start, end in graph.array(graph.kdims):
start_ds = graph.nodes[:, :, start]
end_ds = graph.nodes[:, :, end]
if not len(start_ds) or not len(end_ds):
raise ValueError('Could not find node positions for all edges')
start = start_ds.array(start_ds.kdims[:2])
end = end_ds.array(end_ds.kdims[:2])
paths.append(np.array([start[0], end[0]]))
return paths | 0.001387 |
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
for stddev_type in stddev_types)
C = self.COEFFS[imt]
mean = (self._get_magnitude_scaling(C, rup.mag) +
self._get_distance_scaling(C, rup.mag, dists.rhypo))
if imt.name in "SA PGA":
mean = np.log(np.exp(mean) / (100.0 * g))
stddevs = self._compute_std(C, stddev_types, len(dists.rhypo))
return mean, stddevs | 0.002805 |
def get_temp_url(self, container, obj, seconds, method="GET", key=None,
cached=True):
"""
Given a storage object in a container, returns a URL that can be used
to access that object. The URL will expire after `seconds` seconds.
The only methods supported are GET and PUT. Anything else will raise
an `InvalidTemporaryURLMethod` exception.
If you have your Temporary URL key, you can pass it in directly and
potentially save an API call to retrieve it. If you don't pass in the
key, and don't wish to use any cached value, pass `cached=False`.
"""
if not key:
key = self.api.get_temp_url_key(cached=cached)
if not key:
raise exc.MissingTemporaryURLKey("You must set the key for "
"Temporary URLs before you can generate them. This is "
"done via the `set_temp_url_key()` method.")
cname = utils.get_name(container)
oname = utils.get_name(obj)
mod_method = method.upper().strip()
if mod_method not in ("GET", "PUT"):
raise exc.InvalidTemporaryURLMethod("Method must be either 'GET' "
"or 'PUT'; received '%s'." % method)
mgt_url = self.api.management_url
mtch = re.search(r"/v\d/", mgt_url)
start = mtch.start()
base_url = mgt_url[:start]
path_parts = (mgt_url[start:], cname, oname)
cleaned = (part.strip("/\\") for part in path_parts)
pth = "/%s" % "/".join(cleaned)
expires = int(time.time() + int(seconds))
try:
key = key.encode("ascii")
hmac_body = b'\n'.join([
mod_method.encode("ascii"),
six.text_type(expires).encode("ascii"),
pth.encode("ascii")])
except UnicodeEncodeError:
raise exc.UnicodePathError("Due to a bug in Python, the TempURL "
"function only works with ASCII object paths.")
sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
temp_url = "%s%s?temp_url_sig=%s&temp_url_expires=%s" % (base_url, pth,
sig, expires)
return temp_url | 0.003623 |
def push(self, refspec=None, progress=None, **kwargs):
"""Push changes from source branch in refspec to target branch in refspec.
:param refspec: see 'fetch' method
:param progress:
Can take one of many value types:
* None to discard progress information
* A function (callable) that is called with the progress information.
Signature: ``progress(op_code, cur_count, max_count=None, message='')``.
`Click here <http://goo.gl/NPa7st>`_ for a description of all arguments
given to the function.
* An instance of a class derived from ``git.RemoteProgress`` that
overrides the ``update()`` function.
:note: No further progress information is returned after push returns.
:param kwargs: Additional arguments to be passed to git-push
:return:
IterableList(PushInfo, ...) iterable list of PushInfo instances, each
one informing about an individual head which had been updated on the remote
side.
If the push contains rejected heads, these will have the PushInfo.ERROR bit set
in their flags.
If the operation fails completely, the length of the returned IterableList will
be null."""
kwargs = add_progress(kwargs, self.repo.git, progress)
proc = self.repo.git.push(self, refspec, porcelain=True, as_process=True,
universal_newlines=True, **kwargs)
return self._get_push_info(proc, progress) | 0.006958 |
def _get_settings_file(self, imu_settings_file):
"""
Internal. Logic to check for a system wide RTIMU ini file. This is
copied to the home folder if one is not already found there.
"""
ini_file = '%s.ini' % imu_settings_file
home_dir = pwd.getpwuid(os.getuid())[5]
home_path = os.path.join(home_dir, self.SETTINGS_HOME_PATH)
if not os.path.exists(home_path):
os.makedirs(home_path)
home_file = os.path.join(home_path, ini_file)
home_exists = os.path.isfile(home_file)
system_file = os.path.join('/etc', ini_file)
system_exists = os.path.isfile(system_file)
if system_exists and not home_exists:
shutil.copyfile(system_file, home_file)
return RTIMU.Settings(os.path.join(home_path, imu_settings_file)) | 0.002378 |
def _Fierz_to_JMS_III_IV_V(Fqqqq, qqqq):
"""From 4-quark Fierz to JMS basis for Classes III, IV and V.
`qqqq` should be of the form 'sbuc', 'sdcc', 'ucuu' etc."""
F = Fqqqq.copy()
#case dduu
classIII = ['sbuc', 'sbcu', 'dbuc', 'dbcu', 'dsuc', 'dscu']
classVdduu = ['sbuu' , 'dbuu', 'dsuu', 'sbcc' , 'dbcc', 'dscc']
if qqqq in classIII + classVdduu:
f1 = str(dflav[qqqq[0]] + 1)
f2 = str(dflav[qqqq[1]] + 1)
f3 = str(uflav[qqqq[2]] + 1)
f4 = str(uflav[qqqq[3]] + 1)
d = {'V1udLL_' + f3 + f4 + f1 + f2: F['F' + qqqq + '1'] + F['F' + qqqq + '2'] / Nc,
'V8udLL_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '2'],
'V1duLR_' + f1 + f2 + f3 + f4: F['F' + qqqq + '3'] + F['F' + qqqq + '4'] / Nc,
'V8duLR_' + f1 + f2 + f3 + f4: 2 * F['F' + qqqq + '4'],
'S1udRR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '5'] + F['F' + qqqq + '6'] / Nc - 4 * F['F' + qqqq + '9'] - (4 * F['F' + qqqq + '10']) / Nc,
'S8udRR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '6'] - 8 * F['F' + qqqq + '10'],
'S1udduRR_' + f3 + f2 + f1 + f4: -((8 * F['F' + qqqq + '9']) / Nc) - 8 * F['F' + qqqq + '10'],
'V8udduLR_' + f4 + f1 + f2 + f3: -F['F' + qqqq + '7'].conjugate(),
'V1udduLR_' + f4 + f1 + f2 + f3: -(F['F' + qqqq + '7'].conjugate() / (2 * Nc)) - F['F' + qqqq + '8'].conjugate() / 2,
'S8udduRR_' + f3 + f2 + f1 + f4: -16 * F['F' + qqqq + '9'],
'V1udRR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '1p'] + F['F' + qqqq + '2p'] / Nc,
'V8udRR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '2p'],
'V1udLR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '3p'] + F['F' + qqqq + '4p'] / Nc,
'V8udLR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '4p'],
'S1udRR_' + f4 + f3 + f2 + f1: F['F' + qqqq + '5p'].conjugate() + F['F' + qqqq + '6p'].conjugate() / Nc - 4 * F['F' + qqqq + '9p'].conjugate() - (4 * F['F' + qqqq + '10p'].conjugate()) / Nc,
'S8udRR_' + f4 + f3 + f2 + f1: 2 * F['F' + qqqq + '6p'].conjugate() - 8 * F['F' + qqqq + '10p'].conjugate(),
'S1udduRR_' + f4 + f1 + f2 + f3: -((8 * F['F' + qqqq + '9p'].conjugate()) / Nc) - 8 * F['F' + qqqq + '10p'].conjugate(),
'V8udduLR_' + f3 + f2 + f1 + f4: -F['F' + qqqq + '7p'],
'V1udduLR_' + f3 + f2 + f1 + f4: -(F['F' + qqqq + '7p'] / (2 * Nc)) - F['F' + qqqq + '8p'] / 2,
'S8udduRR_' + f4 + f1 + f2 + f3: -16 * F['F' + qqqq + '9p'].conjugate(),
}
return symmetrize_JMS_dict(d)
#case uudd
classVuudd = ['ucdd', 'ucss','ucbb']
if qqqq in classVuudd:
f3 = str(uflav[qqqq[0]] + 1)
f4 = str(uflav[qqqq[1]] + 1)
f1 = str(dflav[qqqq[2]] + 1)
f2 = str(dflav[qqqq[3]] + 1)
d = {'V1udLL_' + f3 + f4 + f1 + f2: F['F' + qqqq + '1'] + F['F' + qqqq + '2'] / Nc,
'V8udLL_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '2'],
'V1duLR_' + f1 + f2 + f3 + f4: F['F' + qqqq + '3p'] + F['F' + qqqq + '4p'] / Nc,
'V8duLR_' + f1 + f2 + f3 + f4: 2 * F['F' + qqqq + '4p'],
'S1udRR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '5'] + F['F' + qqqq + '6'] / Nc - 4 * F['F' + qqqq + '9'] - (4 * F['F' + qqqq + '10']) / Nc,
'S8udRR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '6'] - 8 * F['F' + qqqq + '10'],
'S1udduRR_' + f3 + f2 + f1 + f4: -((8 * F['F' + qqqq + '9']) / Nc) - 8 * F['F' + qqqq + '10'],
'V8udduLR_' + f4 + f1 + f2 + f3: -F['F' + qqqq + '7p'].conjugate(),
'V1udduLR_' + f4 + f1 + f2 + f3: -(F['F' + qqqq + '7p'].conjugate() / (2 * Nc)) - F['F' + qqqq + '8p'].conjugate() / 2,
'S8udduRR_' + f3 + f2 + f1 + f4: -16 * F['F' + qqqq + '9'],
'V1udRR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '1p'] + F['F' + qqqq + '2p'] / Nc,
'V8udRR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '2p'],
'V1udLR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '3'] + F['F' + qqqq + '4'] / Nc,
'V8udLR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '4'],
'S1udRR_' + f4 + f3 + f2 + f1: F['F' + qqqq + '5p'].conjugate() + F['F' + qqqq + '6p'].conjugate() / Nc - 4 * F['F' + qqqq + '9p'].conjugate() - (4 * F['F' + qqqq + '10p'].conjugate()) / Nc,
'S8udRR_' + f4 + f3 + f2 + f1: 2 * F['F' + qqqq + '6p'].conjugate() - 8 * F['F' + qqqq + '10p'].conjugate(),
'S1udduRR_' + f4 + f1 + f2 + f3: -((8 * F['F' + qqqq + '9p'].conjugate()) / Nc) - 8 * F['F' + qqqq + '10p'].conjugate(),
'V8udduLR_' + f3 + f2 + f1 + f4: -F['F' + qqqq + '7'],
'V1udduLR_' + f3 + f2 + f1 + f4: -(F['F' + qqqq + '7'] / (2 * Nc)) - F['F' + qqqq + '8'] / 2,
'S8udduRR_' + f4 + f1 + f2 + f3: -16 * F['F' + qqqq + '9p'].conjugate(),
}
return symmetrize_JMS_dict(d)
#case dddd
classIV = ['sbsd', 'dbds', 'bsbd']
classVdddd = ['sbss', 'dbdd', 'dsdd', 'sbbb', 'dbbb', 'dsss']
classVddddind = ['sbdd', 'dsbb', 'dbss']
classVuuuu = ['ucuu', 'cucc', 'uccc', 'cuuu']
if qqqq in classVdddd + classIV + classVuuuu:
# if 2nd and 4th or 1st and 3rd fields are the same, Fierz can be used
# to express the even coeffs in terms of the odd ones
for key in F:
# to make sure we're not screwing things up, check that none
# of the even WCs is actually present
assert int(key[5:].replace('p', '')) % 2 == 1, "Unexpected key in Fierz basis: " + key
for p in ['', 'p']:
if qqqq in ['sbbb', 'dbbb', 'dsss', 'uccc']:
F['F' + qqqq + '2' + p] = F['F' + qqqq + '1' + p]
F['F' + qqqq + '4' + p] = -1 / 2 * F['F' + qqqq + '7' + p]
F['F' + qqqq + '6' + p] = -1 / 2 * F['F' + qqqq + '5' + p] - 6 * F['F' + qqqq + '9' + p]
F['F' + qqqq + '8' + p] = -2 * F['F' + qqqq + '3' + p]
F['F' + qqqq + '10' + p] = -1 / 8 * F['F' + qqqq + '5' + p] + 1 / 2 * F['F' + qqqq + '9' + p]
elif qqqq in ['sbss', 'dbdd', 'dsdd', 'sbsd', 'dbds', 'bsbd', 'ucuu']:
notp = 'p' if p == '' else ''
F['F' + qqqq + '2' + p] = F['F' + qqqq + '1' + p]
F['F' + qqqq + '4' + p] = -1 / 2 * F['F' + qqqq + '7' + notp]
F['F' + qqqq + '6' + notp] = -1 / 2 * F['F' + qqqq + '5' + notp] - 6 * F['F' + qqqq + '9' + notp]
F['F' + qqqq + '8' + notp] = -2 * F['F' + qqqq + '3' + p]
F['F' + qqqq + '10' + notp] = -1 / 8 * F['F' + qqqq + '5' + notp] + 1 / 2 * F['F' + qqqq + '9' + notp]
if qqqq in classIV + classVdddd + classVddddind:
f1 = str(dflav[qqqq[0]] + 1)
f2 = str(dflav[qqqq[1]] + 1)
f3 = str(dflav[qqqq[2]] + 1)
f4 = str(dflav[qqqq[3]] + 1)
d = {
'VddLL_' + f3 + f4 + f1 + f2: F['F' + qqqq + '1'],
'VddLL_' + f1 + f4 + f3 + f2: F['F' + qqqq + '2'],
'V1ddLR_' + f1 + f2 + f3 + f4: F['F' + qqqq + '3'] + F['F' + qqqq + '4'] / Nc,
'V8ddLR_' + f1 + f2 + f3 + f4: 2 * F['F' + qqqq + '4'],
'S1ddRR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '5'] + F['F' + qqqq + '6'] / Nc - 4 * F['F' + qqqq + '9'] - (4 * F['F' + qqqq + '10']) / Nc,
'S8ddRR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '6'] - 8 * F['F' + qqqq + '10'],
'V8ddLR_' + f1 + f4 + f3 + f2: -F['F' + qqqq + '7'],
'V1ddLR_' + f1 + f4 + f3 + f2: -(F['F' + qqqq + '7'] / (2 * Nc)) - F['F' + qqqq + '8'] / 2,
'S1ddRR_' + f1 + f4 + f3 + f2: -((8 * F['F' + qqqq + '9']) / Nc) - 8 * F['F' + qqqq + '10'],
'S8ddRR_' + f3 + f2 + f1 + f4: -16 * F['F' + qqqq + '9'],
'VddRR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '1p'],
'VddRR_' + f1 + f4 + f3 + f2: F['F' + qqqq + '2p'],
'V1ddLR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '3p'] + F['F' + qqqq + '4p'] / Nc,
'V8ddLR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '4p'],
'S1ddRR_' + f4 + f3 + f2 + f1: F['F' + qqqq + '5p'].conjugate() + F['F' + qqqq + '6p'].conjugate() / Nc - 4 * F['F' + qqqq + '9p'].conjugate() - (4 * F['F' + qqqq + '10p'].conjugate()) / Nc,
'S8ddRR_' + f4 + f3 + f2 + f1: 2 * F['F' + qqqq + '6p'].conjugate() - 8 * F['F' + qqqq + '10p'].conjugate(),
'V8ddLR_' + f3 + f2 + f1 + f4: -F['F' + qqqq + '7p'],
'V1ddLR_' + f3 + f2 + f1 + f4: -(F['F' + qqqq + '7p'] / (2 * Nc)) - F['F' + qqqq + '8p'] / 2,
'S1ddRR_' + f4 + f1 + f2 + f3: -((8 * F['F' + qqqq + '9p'].conjugate()) / Nc) - 8 * F['F' + qqqq + '10p'].conjugate(),
'S8ddRR_' + f4 + f1 + f2 + f3: -16 * F['F' + qqqq + '9p'].conjugate(),
}
return symmetrize_JMS_dict(d)
#case uuuu
if qqqq in classVuuuu:
f1 = str(uflav[qqqq[0]] + 1)
f2 = str(uflav[qqqq[1]] + 1)
f3 = str(uflav[qqqq[2]] + 1)
f4 = str(uflav[qqqq[3]] + 1)
d = {
'VuuLL_' + f3 + f4 + f1 + f2: F['F' + qqqq + '1'],
'VuuLL_' + f1 + f4 + f3 + f2: F['F' + qqqq + '2'],
'V1uuLR_' + f1 + f2 + f3 + f4: F['F' + qqqq + '3'] + F['F' + qqqq + '4'] / Nc,
'V8uuLR_' + f1 + f2 + f3 + f4: 2 * F['F' + qqqq + '4'],
'S1uuRR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '5'] + F['F' + qqqq + '6'] / Nc - 4 * F['F' + qqqq + '9'] - (4 * F['F' + qqqq + '10']) / Nc,
'S8uuRR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '6'] - 8 * F['F' + qqqq + '10'],
'V8uuLR_' + f1 + f4 + f3 + f2: -F['F' + qqqq + '7'],
'V1uuLR_' + f1 + f4 + f3 + f2: -(F['F' + qqqq + '7'] / (2 * Nc)) - F['F' + qqqq + '8'] / 2,
'S1uuRR_' + f1 + f4 + f3 + f2: -((8 * F['F' + qqqq + '9']) / Nc) - 8 * F['F' + qqqq + '10'],
'S8uuRR_' + f3 + f2 + f1 + f4: -16 * F['F' + qqqq + '9'],
'VuuRR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '1p'],
'VuuRR_' + f1 + f4 + f3 + f2: F['F' + qqqq + '2p'],
'V1uuLR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '3p'] + F['F' + qqqq + '4p'] / Nc,
'V8uuLR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '4p'],
'S1uuRR_' + f4 + f3 + f2 + f1: F['F' + qqqq + '5p'].conjugate() + F['F' + qqqq + '6p'].conjugate() / Nc - 4 * F['F' + qqqq + '9p'].conjugate() - (4 * F['F' + qqqq + '10p'].conjugate()) / Nc,
'S8uuRR_' + f4 + f3 + f2 + f1: 2 * F['F' + qqqq + '6p'].conjugate() - 8 * F['F' + qqqq + '10p'].conjugate(),
'V8uuLR_' + f3 + f2 + f1 + f4: -F['F' + qqqq + '7p'],
'V1uuLR_' + f3 + f2 + f1 + f4: -(F['F' + qqqq + '7p'] / (2 * Nc)) - F['F' + qqqq + '8p'] / 2,
'S1uuRR_' + f4 + f1 + f2 + f3: -((8 * F['F' + qqqq + '9p'].conjugate()) / Nc) - 8 * F['F' + qqqq + '10p'].conjugate(),
'S8uuRR_' + f4 + f1 + f2 + f3: -16 * F['F' + qqqq + '9p'].conjugate()
}
return symmetrize_JMS_dict(d)
raise ValueError("Case not implemented: {}".format(qqqq)) | 0.013565 |
def execute_concurrent_with_args(session, statement, parameters, *args, **kwargs):
"""
Like :meth:`~cassandra.concurrent.execute_concurrent()`, but takes a single
statement and a sequence of parameters. Each item in ``parameters``
should be a sequence or :const:`None`.
Example usage::
statement = session.prepare("INSERT INTO mytable (a, b) VALUES (1, ?)")
parameters = [(x,) for x in range(1000)]
execute_concurrent_with_args(session, statement, parameters, concurrency=50)
"""
return execute_concurrent(session, zip(cycle((statement,)), parameters), *args, **kwargs) | 0.00641 |
def get_list_url_filtered_by_field_value(view, model, name, reverse=False):
"""Get the URL if a filter of model[name] value was appended.
This allows programatically adding filters. This is used in the specialized case
of filtering deeper into a list by a field's value.
For instance, since there can be multiple assignments in a list of handins. The
assignment column can have a URL generated by get_filter_url to filter the handins
to show only ones for that assignment.
Parameters
----------
view : View instance
model : document (model instance, not the class itself)
name : field name
reverse : bool
Whether to *remove* an applied filter from url
Returns
-------
string : URL of current list args + filtering on field value
"""
view_args = view._get_list_extra_args()
def create_filter_arg(field_name, value):
i, flt = next(
(
v
for k, v in view._filter_args.items()
if k == '{}_equals'.format(field_name)
),
None,
)
return (i, flt.name, value)
new_filter = create_filter_arg(name, model[name])
filters = view_args.filters
if new_filter in view_args.filters: # Filter already applied
if not reverse:
return None
else: # Remove filter
filters.remove(new_filter)
if not reverse: # Add Filter
filters.append(new_filter)
# Example of an activated filter: (u'view_args.filters', [(7, u'Path', u'course')])
return view._get_list_url(
view_args.clone(filters=filters, page=0) # Reset page to 0
) | 0.002978 |
def extendMarkdown(self, md, md_globals):
"""
Every extension requires a extendMarkdown method to tell the markdown
renderer how use the extension.
"""
md.registerExtension(self)
for processor in (self.preprocessors or []):
md.preprocessors.add(processor.__name__.lower(), processor(md), '_end')
for pattern in (self.inlinepatterns or []):
md.inlinePatterns.add(pattern.__name__.lower(), pattern(md), '_end')
for processor in (self.postprocessors or []):
md.postprocessors.add(processor.__name__.lower(), processor(md), '_end') | 0.007937 |
def _get_mps_od_net(input_image_shape, batch_size, output_size, anchors,
config, weights={}):
"""
Initializes an MpsGraphAPI for object detection.
"""
network = _MpsGraphAPI(network_id=_MpsGraphNetworkType.kODGraphNet)
c_in, h_in, w_in = input_image_shape
c_out = output_size
h_out = h_in // 32
w_out = w_in // 32
c_view = c_in
h_view = h_in
w_view = w_in
network.init(batch_size, c_in, h_in, w_in, c_out, h_out, w_out,
weights=weights, config=config)
return network | 0.003578 |
def post(self, request, pk=None):
""" Handles POST requests. """
self.top_level_forum = get_object_or_404(Forum, pk=pk) if pk else None
return self.mark_as_read(request, pk) | 0.010152 |
def project_to_image(self, point_cloud, round_px=True):
"""Projects a point cloud onto the camera image plane and creates
a depth image. Zero depth means no point projected into the camera
at that pixel location (i.e. infinite depth).
Parameters
----------
point_cloud : :obj:`autolab_core.PointCloud` or :obj:`autolab_core.Point`
A PointCloud or Point to project onto the camera image plane.
round_px : bool
If True, projections are rounded to the nearest pixel.
Returns
-------
:obj:`DepthImage`
A DepthImage generated from projecting the point cloud into the
camera.
Raises
------
ValueError
If the input is not a PointCloud or Point in the same reference
frame as the camera.
"""
if not isinstance(point_cloud, PointCloud) and not (isinstance(point_cloud, Point) and point_cloud.dim == 3):
raise ValueError('Must provide PointCloud or 3D Point object for projection')
if point_cloud.frame != self._frame:
raise ValueError('Cannot project points in frame %s into camera with frame %s' %(point_cloud.frame, self._frame))
points_proj = self.S.dot(point_cloud.data) + self.t
if len(points_proj.shape) == 1:
points_proj = points_proj[:, np.newaxis]
point_depths = points_proj[2,:]
point_z = np.tile(point_depths, [3, 1])
points_proj = np.divide(points_proj, point_z)
if round_px:
points_proj = np.round(points_proj)
points_proj = points_proj[:2,:].astype(np.int16)
valid_ind = np.where((points_proj[0,:] >= 0) & \
(points_proj[1,:] >= 0) & \
(points_proj[0,:] < self.width) & \
(points_proj[1,:] < self.height))[0]
depth_data = np.zeros([self.height, self.width])
depth_data[points_proj[1,valid_ind], points_proj[0,valid_ind]] = point_depths[valid_ind]
return DepthImage(depth_data, frame=self.frame) | 0.008924 |
def get_urlpatterns(self):
""" Returns the URL patterns managed by the considered factory / application. """
return [
url(r'', include(self.forum_urlpatterns_factory.urlpatterns)),
url(r'', include(self.conversation_urlpatterns_factory.urlpatterns)),
url(_(r'^feeds/'), include(self.feeds_urlpatterns_factory.urlpatterns)),
url(_(r'^member/'), include(self.member_urlpatterns_factory.urlpatterns)),
url(_(r'^moderation/'), include(self.moderation_urlpatterns_factory.urlpatterns)),
url(_(r'^search/'), include(self.search_urlpatterns_factory.urlpatterns)),
url(_(r'^tracking/'), include(self.tracking_urlpatterns_factory.urlpatterns)),
] | 0.012081 |
def search(name, official=False, trusted=False):
'''
Searches the registry for an image
name
Search keyword
official : False
Limit results to official builds
trusted : False
Limit results to `trusted builds`_
**RETURN DATA**
A dictionary with each key being the name of an image, and the following
information for each image:
- ``Description`` - Image description
- ``Official`` - A boolean (``True`` if an official build, ``False`` if
not)
- ``Stars`` - Number of stars the image has on the registry
- ``Trusted`` - A boolean (``True`` if a trusted build, ``False`` if not)
CLI Example:
.. code-block:: bash
salt myminion docker.search centos
salt myminion docker.search centos official=True
'''
response = _client_wrapper('search', name)
if not response:
raise CommandExecutionError(
'No images matched the search string \'{0}\''.format(name)
)
key_map = {
'description': 'Description',
'is_official': 'Official',
'is_trusted': 'Trusted',
'star_count': 'Stars'
}
limit = []
if official:
limit.append('Official')
if trusted:
limit.append('Trusted')
results = {}
for item in response:
c_name = item.pop('name', None)
if c_name is not None:
for key in item:
mapped_key = key_map.get(key, key)
results.setdefault(c_name, {})[mapped_key] = item[key]
if not limit:
return results
ret = {}
for key, val in six.iteritems(results):
for item in limit:
if val.get(item, False):
ret[key] = val
break
return ret | 0.000566 |
async def get_pypi_version(self):
"""Get version published to PyPi."""
self._version_data["beta"] = self.beta
self._version_data["source"] = "PyPi"
info_version = None
last_release = None
try:
async with async_timeout.timeout(5, loop=self.loop):
response = await self.session.get(URL["pypi"])
data = await response.json()
info_version = data["info"]["version"]
releases = data["releases"]
for version in sorted(releases, reverse=True):
if re.search(r"^(\\d+\\.)?(\\d\\.)?(\\*|\\d+)$", version):
continue
else:
last_release = version
break
self._version = info_version
if self.beta:
if info_version in last_release:
self._version = info_version
else:
self._version = last_release
_LOGGER.debug("Version: %s", self.version)
_LOGGER.debug("Version data: %s", self.version_data)
except asyncio.TimeoutError as error:
_LOGGER.error("Timeouterror fetching version information from PyPi")
except KeyError as error:
_LOGGER.error("Error parsing version information from PyPi, %s", error)
except TypeError as error:
_LOGGER.error("Error parsing version information from PyPi, %s", error)
except aiohttp.ClientError as error:
_LOGGER.error("Error fetching version information from PyPi, %s", error)
except socket.gaierror as error:
_LOGGER.error("Error fetching version information from PyPi, %s", error)
except Exception as error: # pylint: disable=broad-except
_LOGGER.critical("Something really wrong happend! - %s", error) | 0.003723 |
def check_node_attributes(pattern, node, *attributes):
"""
Searches match in attributes against given pattern and if
finds the match against any of them returns True.
"""
for attribute_name in attributes:
attribute = node.get(attribute_name)
if attribute is not None and pattern.search(attribute):
return True
return False | 0.002667 |
def retrieve(self, request, *args, **kwargs):
"""
Optional `field` query parameter (can be list) allows to limit what fields are returned.
For example, given request /api/projects/<uuid>/?field=uuid&field=name you get response like this:
.. code-block:: javascript
{
"uuid": "90bcfe38b0124c9bbdadd617b5d739f5",
"name": "Default"
}
"""
return super(ProjectViewSet, self).retrieve(request, *args, **kwargs) | 0.007843 |
def callback(self, username, request):
""" Having :username: return user's identifiers or None. """
credentials = self._get_credentials(request)
if credentials:
username, api_key = credentials
if self.check:
return self.check(username, api_key, request) | 0.006309 |
def query(self, table_name, hash_key_value, range_key_conditions=None,
attributes_to_get=None, limit=None, consistent_read=False,
scan_index_forward=True, exclusive_start_key=None,
object_hook=None):
"""
Perform a query of DynamoDB. This version is currently punting
and expecting you to provide a full and correct JSON body
which is passed as is to DynamoDB.
:type table_name: str
:param table_name: The name of the table to query.
:type hash_key_value: dict
:param key: A DynamoDB-style HashKeyValue.
:type range_key_conditions: dict
:param range_key_conditions: A Python version of the
RangeKeyConditions data structure.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type limit: int
:param limit: The maximum number of items to return.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type scan_index_forward: bool
:param scan_index_forward: Specified forward or backward
traversal of the index. Default is forward (True).
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
"""
data = {'TableName': table_name,
'HashKeyValue': hash_key_value}
if range_key_conditions:
data['RangeKeyCondition'] = range_key_conditions
if attributes_to_get:
data['AttributesToGet'] = attributes_to_get
if limit:
data['Limit'] = limit
if consistent_read:
data['ConsistentRead'] = True
if scan_index_forward:
data['ScanIndexForward'] = True
else:
data['ScanIndexForward'] = False
if exclusive_start_key:
data['ExclusiveStartKey'] = exclusive_start_key
json_input = json.dumps(data)
return self.make_request('Query', json_input,
object_hook=object_hook) | 0.002029 |
def pdfdump(self, filename=None, **kargs):
"""pdfdump(filename=None, layer_shift=0, rebuild=1)
Creates a PDF file describing a packet. If filename is not provided a temporary file is created and xpdf is called."""
canvas = self.canvas_dump(**kargs)
if filename is None:
fname = get_temp_file(autoext=".pdf")
canvas.writePDFfile(fname)
subprocess.Popen([conf.prog.pdfreader, fname+".pdf"])
else:
canvas.writePDFfile(filename) | 0.005859 |
def open(self, options):
"""
Open and include the refrenced schema.
@param options: An options dictionary.
@type options: L{options.Options}
@return: The referenced schema.
@rtype: L{Schema}
"""
if self.opened:
return
self.opened = True
log.debug('%s, including location="%s"', self.id, self.location)
result = self.download(options)
log.debug('included:\n%s', result)
return result | 0.004016 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.