text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def main():
"""
Example to show AIKIF logging of results.
Generates a sequence of random grids and runs the
Game of Life, saving results
"""
iterations = 9 # how many simulations to run
years = 3 # how many times to run each simulation
width = 22 # grid height
height = 78 # grid width
time_delay = 0.03 # delay when printing on screen
lg = mod_log.Log('test')
lg.record_process('Game of Life', 'game_of_life_console.py')
for _ in range(iterations):
s,e = run_game_of_life(years, width, height, time_delay, 'N')
lg.record_result("Started with " + str(s) + " cells and ended with " + str(e) + " cells") | 0.014144 |
def get_cacheable(cache_key, cache_ttl, calculate, recalculate=False):
"""
Gets the result of a method call, using the given key and TTL as a cache
"""
if not recalculate:
cached = cache.get(cache_key)
if cached is not None:
return json.loads(cached)
calculated = calculate()
cache.set(cache_key, json.dumps(calculated), cache_ttl)
return calculated | 0.002457 |
def generate_identity_binding_access_token(
self,
name,
scope,
jwt,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Exchange a JWT signed by third party identity provider to an OAuth 2.0
access token
Example:
>>> from google.cloud import iam_credentials_v1
>>>
>>> client = iam_credentials_v1.IAMCredentialsClient()
>>>
>>> name = client.service_account_path('[PROJECT]', '[SERVICE_ACCOUNT]')
>>>
>>> # TODO: Initialize `scope`:
>>> scope = []
>>>
>>> # TODO: Initialize `jwt`:
>>> jwt = ''
>>>
>>> response = client.generate_identity_binding_access_token(name, scope, jwt)
Args:
name (str): The resource name of the service account for which the credentials are
requested, in the following format:
``projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}``.
scope (list[str]): Code to identify the scopes to be included in the OAuth 2.0 access token.
See https://developers.google.com/identity/protocols/googlescopes for more
information.
At least one value required.
jwt (str): Required. Input token. Must be in JWT format according to RFC7523
(https://tools.ietf.org/html/rfc7523) and must have 'kid' field in the
header. Supported signing algorithms: RS256 (RS512, ES256, ES512 coming
soon). Mandatory payload fields (along the lines of RFC 7523, section
3):
- iss: issuer of the token. Must provide a discovery document at
$iss/.well-known/openid-configuration . The document needs to be
formatted according to section 4.2 of the OpenID Connect Discovery
1.0 specification.
- iat: Issue time in seconds since epoch. Must be in the past.
- exp: Expiration time in seconds since epoch. Must be less than 48
hours after iat. We recommend to create tokens that last shorter than
6 hours to improve security unless business reasons mandate longer
expiration times. Shorter token lifetimes are generally more secure
since tokens that have been exfiltrated by attackers can be used for
a shorter time. you can configure the maximum lifetime of the
incoming token in the configuration of the mapper. The resulting
Google token will expire within an hour or at "exp", whichever is
earlier.
- sub: JWT subject, identity asserted in the JWT.
- aud: Configured in the mapper policy. By default the service account
email.
Claims from the incoming token can be transferred into the output token
accoding to the mapper configuration. The outgoing claim size is
limited. Outgoing claims size must be less than 4kB serialized as JSON
without whitespace.
Example header: { "alg": "RS256", "kid":
"92a4265e14ab04d4d228a48d10d4ca31610936f8" } Example payload: { "iss":
"https://accounts.google.com", "iat": 1517963104, "exp": 1517966704,
"aud": "https://iamcredentials.googleapis.com/", "sub":
"113475438248934895348", "my\_claims": { "additional\_claim": "value" }
}
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.iam_credentials_v1.types.GenerateIdentityBindingAccessTokenResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "generate_identity_binding_access_token" not in self._inner_api_calls:
self._inner_api_calls[
"generate_identity_binding_access_token"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.generate_identity_binding_access_token,
default_retry=self._method_configs[
"GenerateIdentityBindingAccessToken"
].retry,
default_timeout=self._method_configs[
"GenerateIdentityBindingAccessToken"
].timeout,
client_info=self._client_info,
)
request = common_pb2.GenerateIdentityBindingAccessTokenRequest(
name=name, scope=scope, jwt=jwt
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["generate_identity_binding_access_token"](
request, retry=retry, timeout=timeout, metadata=metadata
) | 0.005701 |
def overlay_gateway_access_lists_ipv4_out_ipv4_acl_out_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv4 = ET.SubElement(access_lists, "ipv4")
out = ET.SubElement(ipv4, "out")
ipv4_acl_out_name = ET.SubElement(out, "ipv4-acl-out-name")
ipv4_acl_out_name.text = kwargs.pop('ipv4_acl_out_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003963 |
def apply_t0(self, hits):
"""Apply only t0s"""
if HAVE_NUMBA:
apply_t0_nb(
hits.time, hits.dom_id, hits.channel_id, self._lookup_tables
)
else:
n = len(hits)
cal = np.empty(n)
lookup = self._calib_by_dom_and_channel
for i in range(n):
calib = lookup[hits['dom_id'][i]][hits['channel_id'][i]]
cal[i] = calib[6]
hits.time += cal
return hits | 0.003984 |
def load_mplstyle():
"""Try to load conf.plot.mplstyle matplotlib style."""
plt = importlib.import_module('matplotlib.pyplot')
if conf.plot.mplstyle:
for style in conf.plot.mplstyle.split():
stfile = config.CONFIG_DIR / (style + '.mplstyle')
if stfile.is_file():
style = str(stfile)
try:
plt.style.use(style)
except OSError:
print('Cannot import style {}.'.format(style),
file=sys.stderr)
conf.plot.mplstyle = ''
if conf.plot.xkcd:
plt.xkcd() | 0.001645 |
def created(self):
"""Union[datetime.datetime, None]: Datetime at which the model was
created (:data:`None` until set from the server).
Read-only.
"""
value = self._proto.creation_time
if value is not None and value != 0:
# value will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(value)
) | 0.004577 |
def set_collections_acl(self):
""" Calculate and set ACL valid for requested collections.
DENY_ALL is added to ACL to make sure no access rules are
inherited.
"""
acl = [(Allow, 'g:admin', ALL_PERMISSIONS)]
collections = self.get_collections()
resources = self.get_resources(collections)
aces = self._get_least_permissions_aces(resources)
if aces is not None:
for ace in aces:
acl.append(ace)
acl.append(DENY_ALL)
self.__acl__ = tuple(acl) | 0.003597 |
def get_lines_from_file(filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
# Traceback (most recent call last):
# File "/Users/dcramer/Development/django-sentry/sentry/client/handlers.py", line 31, in emit
# get_client().create_from_record(record, request=request)
# File "/Users/dcramer/Development/django-sentry/sentry/client/base.py", line 325, in create_from_record
# data['__sentry__']['frames'] = varmap(shorten, get_stack_info(stack))
# File "/Users/dcramer/Development/django-sentry/sentry/utils/stacks.py", line 112, in get_stack_info
# pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno, 7, loader, module_name)
# File "/Users/dcramer/Development/django-sentry/sentry/utils/stacks.py", line 24, in get_lines_from_file
# source = loader.get_source(module_name)
# File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pkgutil.py", line 287, in get_source
# fullname = self._fix_name(fullname)
# File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pkgutil.py", line 262, in _fix_name
# "module %s" % (self.fullname, fullname))
# ImportError: Loader for module cProfile cannot handle module __main__
source = None
if source is not None:
source = source.splitlines()
if source is None:
try:
f = open(filename)
try:
source = f.readlines()
finally:
f.close()
except (OSError, IOError):
pass
if source is None:
return []
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = _coding_re.search(line)
if match:
encoding = match.group(1)
break
source = [unicode(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = min(lineno + context_lines + 1, len(source))
try:
return [(lineno + 1, source[lineno].strip('\n')) for lineno in xrange(lower_bound, upper_bound)]
except IndexError:
# the file may have changed since it was loaded into memory
return [] | 0.004286 |
def tokens(self, sentence_dom):
'''
Tokenize all the words and preserve NER labels from ENAMEX tags
'''
## keep track of sentence position, which is reset for each
## sentence, and used above in _make_token
self.sent_pos = 0
## keep track of mention_id, so we can distinguish adjacent
## multi-token mentions within the same coref chain
mention_id = 0
while len(sentence_dom.childNodes) > 0:
## shrink the sentence_dom's child nodes. In v0_2_0 this
## was required to cope with HitMaxi16. Now it is just to
## save memory.
node = sentence_dom.childNodes.pop(0)
if node.nodeType == node.TEXT_NODE:
## process portion before an ENAMEX tag
for line in node.data.splitlines(True):
self._input_string = line
for start, end in self.word_tokenizer.span_tokenize(line):
tok = self._make_token(start, end)
if tok:
yield tok
if line.endswith('\n'):
## maintain the index to the current line
self.line_idx += 1
## increment index pasat the 'before' portion
self.byte_idx += len(line.encode('utf-8'))
else:
## process text inside an ENAMEX tag
assert node.nodeName == 'ENAMEX', node.nodeName
chain_id = node.attributes.get('ID').value
entity_type = node.attributes.get('TYPE').value
for node in node.childNodes:
assert node.nodeType == node.TEXT_NODE, node.nodeType
for line in node.data.splitlines(True):
self._input_string = line
for start, end in self.word_tokenizer.span_tokenize(line):
tok = self._make_token(start, end)
if tok:
if entity_type in _PRONOUNS:
tok.mention_type = MentionType.PRO
tok.entity_type = _ENTITY_TYPES[entity_type]
## create an attribute
attr = Attribute(
attribute_type=AttributeType.PER_GENDER,
value=str(_PRONOUNS[entity_type])
)
self.attributes.append(attr)
else:
## regular entity_type
tok.mention_type = MentionType.NAME
tok.entity_type = _ENTITY_TYPES[entity_type]
tok.equiv_id = int(chain_id)
tok.mention_id = mention_id
yield tok
if line.endswith('\n'):
## maintain the index to the current line
self.line_idx += 1
## increment index pasat the 'before' portion
self.byte_idx += len(line.encode('utf-8'))
## increment mention_id within this sentence
mention_id += 1 | 0.006851 |
def _get_spec(self, spec_file):
"""Get json specification from package data."""
spec_file_path = os.path.join(
pkg_resources.
resource_filename(
'reana_commons',
'openapi_specifications'),
spec_file)
with open(spec_file_path) as f:
json_spec = json.load(f)
return json_spec | 0.005181 |
def build_tree(self, *args, **kwargs):
"""Dispatch a tree build call. Note that you need at least four
taxa to express some evolutionary history on an unrooted tree."""
# Check length #
assert len(self) > 3
# Default option #
algorithm = kwargs.pop(kwargs, None)
if algorithm is None: algorithm = 'raxml'
# Dispatch #
if algorithm is 'raxml': return self.build_tree_raxml(*args, **kwargs)
if algorithm is 'fasttree': return self.build_tree_fast(*args, **kwargs) | 0.014706 |
def _initURL(self,
org_url,
referer_url):
""" sets proper URLs for AGOL """
if org_url is not None and org_url != '':
if not org_url.startswith('http://') and not org_url.startswith('https://'):
org_url = 'https://' + org_url
self._org_url = org_url
if self._org_url.lower().find('/sharing/rest') > -1:
self._url = self._org_url
else:
self._url = self._org_url + "/sharing/rest"
if self._url.startswith('http://'):
self._surl = self._url.replace('http://', 'https://')
else:
self._surl = self._url
parsed_url = urlparse(self._org_url)
self._parsed_org_url = urlunparse((parsed_url[0],parsed_url[1],"","","",""))
if referer_url is None:
parsed_org = urlparse(self._org_url)
self._referer_url = parsed_org.netloc
url = '{}/portals/self'.format( self._url)
parameters = {
'f': 'json'
}
portal_info = self._post(url=url,
param_dict=parameters,
securityHandler=self,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if 'user' in portal_info:
if 'username' in portal_info['user']:
self._username = portal_info['user']['username'] | 0.007947 |
def receivable_id(self, receivable_id):
"""
Sets the receivable_id of this AdditionalRecipientReceivableRefund.
The ID of the receivable that the refund was applied to.
:param receivable_id: The receivable_id of this AdditionalRecipientReceivableRefund.
:type: str
"""
if receivable_id is None:
raise ValueError("Invalid value for `receivable_id`, must not be `None`")
if len(receivable_id) < 1:
raise ValueError("Invalid value for `receivable_id`, length must be greater than or equal to `1`")
self._receivable_id = receivable_id | 0.007949 |
def __balance(self, account_id, **kwargs):
"""Call documentation: `/account/balance
<https://www.wepay.com/developer/reference/account-2011-01-15#balance>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
.. warning ::
This call is depricated as of API version '2014-01-08'.
"""
params = {
'account_id': account_id
}
return self.make_call(self.__balance, params, kwargs) | 0.005252 |
def dump_OrderedDict(self, obj, class_name="collections.OrderedDict"):
"""
``collections.OrderedDict`` dumper.
"""
return {
"$" + class_name: [
(key, self._json_convert(value)) for key, value in iteritems(obj)
]
} | 0.010239 |
def quantile_for_single_value(self, **kwargs):
"""Returns quantile of each column or row.
Returns:
A new QueryCompiler object containing the quantile of each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().quantile_for_single_value(**kwargs)
axis = kwargs.get("axis", 0)
q = kwargs.get("q", 0.5)
assert type(q) is float
def quantile_builder(df, **kwargs):
try:
return pandas.DataFrame.quantile(df, **kwargs)
except ValueError:
return pandas.Series()
func = self._build_mapreduce_func(quantile_builder, **kwargs)
result = self._full_axis_reduce(axis, func)
if axis == 0:
result.index = [q]
else:
result.columns = [q]
return result | 0.003282 |
def add_group_grant(self, permission, group_id):
"""
Convenience method that provides a quick way to add a canonical group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type group_id: string
:param group_id: The canonical group id associated with the Google
Groups account you are granting the permission to.
"""
acl = self.get_acl()
acl.add_group_grant(permission, group_id)
self.set_acl(acl) | 0.00333 |
def get_expr(self, ctx):
"""
Returns the MUF needed to get the contents of the lvalue.
Returned MUF will push the contained value onto the stack.
"""
varname = ctx.lookup_variable(self.varname)
if varname is None:
val = ctx.lookup_constant(self.varname)
if val:
try:
return val.generate_code(ctx)
except AttributeError:
return val
raise MuvError(
"Undeclared identifier '%s'." % self.varname,
position=self.position
)
if len(self.indexing) == 0:
return "{var} @".format(
var=varname,
)
if len(self.indexing) == 1:
return "{var} @ {idx} []".format(
var=varname,
idx=self.indexing[0],
)
return (
"{var} @ {{ {idx} }}list array_nested_get".format(
var=varname,
idx=" ".join(str(x) for x in self.indexing),
)
) | 0.001835 |
def send_at(self, value):
"""A unix timestamp specifying when your email should
be delivered.
:param value: A unix timestamp specifying when your email should
be delivered.
:type value: SendAt, int
"""
if isinstance(value, SendAt):
if value.personalization is not None:
try:
personalization = \
self._personalizations[value.personalization]
has_internal_personalization = True
except IndexError:
personalization = Personalization()
has_internal_personalization = False
personalization.send_at = value.send_at
if not has_internal_personalization:
self.add_personalization(
personalization, index=value.personalization)
else:
self._send_at = value
else:
self._send_at = SendAt(value) | 0.001978 |
def highlight_multi_regex(str_, pat_to_color, reflags=0):
"""
FIXME Use pygments instead. must be mututally exclusive
"""
#import colorama
# from colorama import Fore, Style
#color = Fore.MAGENTA
# color = Fore.RED
#match = re.search(pat, str_, flags=reflags)
colored = str_
to_replace = []
for pat, color in pat_to_color.items():
matches = list(re.finditer(pat, str_, flags=reflags))
for match in matches:
start = match.start()
end = match.end()
to_replace.append((end, start, color))
for tup in reversed(sorted(to_replace)):
end, start, color = tup
colored_part = color_text(colored[start:end], color)
colored = colored[:start] + colored_part + colored[end:]
return colored | 0.00495 |
def text_to_qcolor(text):
"""
Create a QColor from specified string
Avoid warning from Qt when an invalid QColor is instantiated
"""
color = QColor()
if not is_string(text): # testing for QString (PyQt API#1)
text = str(text)
if not is_text_string(text):
return color
if text.startswith('#') and len(text)==7:
correct = '#0123456789abcdef'
for char in text:
if char.lower() not in correct:
return color
elif text not in list(QColor.colorNames()):
return color
color.setNamedColor(text)
return color | 0.004918 |
def rock(self):
"""Starts and does the parsing."""
if not self.argv:
self.arg.view()
while(self.argv):
arg = self.argv.popleft()
if arg == "-h" or arg == "--help":
print(
"""Usage: td [-h (--help)] [-v (--version)] [command]"""
""", where [command] is one of:\n\n"""
"""v (view)\tChanges the way next output"""
""" will look like. See [td v -h].\n"""
"""m (modify)\tApplies one time changes to"""
""" the database. See [td m -h].\n"""
"""o (options)\tSets persistent options, applied"""
""" on every next execution. See [td o -h].\n"""
"""a (add)\t\tAdds new item. See [td a -h].\n"""
"""e (edit)\tEdits existing item. See [td e -h].\n"""
"""r (rm)\t\tRemoves existing item. See [td r -h].\n"""
"""d (done)\tMarks items as done. See [td d -h].\n"""
"""D (undone)\tMarks items as not done. See [td D -h].\n"""
"""\nAdditional options:\n"""
""" -h (--help)\tShows this screen.\n"""
""" -v (--version)Shows version number."""
)
elif arg == "-v" or arg == "--version":
print("td :: {}".format(__version__))
elif arg == "v" or arg == "view":
self._part("view", self.arg.view, {
"--no-color": ("nocolor", False),
"-s": ("sort", True), "--sort": ("sort", True),
"-p": ("purge", False), "--purge": ("purge", False),
"-d": ("done", True), "--done": ("done", True),
"-D": ("undone", True), "--undone": ("undone", True)
},
"""Usage: td v [-h (--help)] [command(s)]"""
""", where [command(s)] are any of:\n\n"""
"""-s (--sort) <pattern>\tSorts the output using"""
""" <pattern>.\n"""
"""-p (--purge)\t\tHides items marked as done.\n"""
"""-d (--done) <pattern>\tDisplays items matching"""
""" <pattern> as done.\n"""
"""-D (--undone) <pattern>\tDisplays items matching"""
""" <pattern> as not done.\n"""
"""--no-color\t\tDo not add color codes to the output.\n"""
"""\nAdditional options:\n"""
""" -h (--help)\t\tShows this screen."""
)
elif arg == "m" or arg == "modify":
self._part("modify", self.arg.modify, {
"-s": ("sort", True), "--sort": ("sort", True),
"-p": ("purge", False), "--purge": ("purge", False),
"-d": ("done", True), "--done": ("done", True),
"-D": ("undone", True), "--undone": ("undone", True)
},
"""Usage: td m [-h (--help)] [command(s)]"""
""", where [command(s)] are any of:\n\n"""
"""-s (--sort) <pattern>\tSorts database using"""
""" <pattern>.\n"""
"""-p (--purge)\t\tRemoves items marked as done.\n"""
"""-d (--done) <pattern>\tMarks items matching"""
""" <pattern> as done.\n"""
"""-D (--undone) <pattern>\tMarks items matching"""
""" <pattern> as not done.\n"""
"""\nAdditional options:\n"""
""" -h (--help)\t\tShows this screen."""
)
elif arg == "a" or arg == "add":
args = dict()
if self.argv and self.arg.model.exists(self.argv[0]):
args["parent"] = self.argv.popleft()
self._part("add", self.arg.add, {
"-n": ("name", True), "--name": ("name", True),
"-p": ("priority", True), "--priority": ("priority", True),
"-c": ("comment", True), "--comment": ("comment", True)
},
"""Usage: td a [-h (--help)] [parent] [command(s)]"""
""", where [command(s)] are any of:\n\n"""
"""-n (--name) <text>\t\tSets item's name.\n"""
"""-p (--priority) <no|name>\tSets item's priority.\n"""
"""-c (--comment) <text>\t\tSets item's comment.\n"""
"""\nIf [parent] index is specified, new item will"""
""" become it's child.\n"""
"""If any of the arguments is omitted,"""
""" this command will launch an interactive session"""
""" letting the user supply the rest of them.\n"""
"""\nAdditional options:\n"""
""" -h (--help)\t\t\tShows this screen.""",
**args
)
elif arg == "e" or arg == "edit":
if not self.argv:
raise NotEnoughArgumentsError("edit")
args = dict()
if self.argv[0] not in ["-h", "--help"]:
args["index"] = self.argv.popleft()
self._part("edit", self.arg.edit, {
"--parent": ("parent", True),
"-n": ("name", True), "--name": ("name", True),
"-p": ("priority", True), "--priority": ("priority", True),
"-c": ("comment", True), "--comment": ("comment", True)
},
"""Usage: td e [-h (--help)] <index> [command(s)]"""
""", where [command(s)] are any of:\n\n"""
"""--parent <index>\t\tChanges item's parent.\n"""
"""-n (--name) <text>\t\tChanges item's name.\n"""
"""-p (--priority) <no|name>\tChanges item's priority.\n"""
"""-c (--comment) <text>\t\tChanges item's comment.\n"""
"""\nIndex argument is required and has to point at"""
""" an existing item.\n"""
"""If any of the arguments is omitted, it will launch"""
""" an interactive session letting the user supply the"""
""" rest of them.\n"""
"""\nAdditions options:\n"""
""" -h (--help)\t\t\tShows this screen.""",
**args
)
elif arg == "r" or arg == "rm":
args = dict()
if not self.argv:
raise NotEnoughArgumentsError("rm")
elif self.argv[0] not in ["-h", "--help"]:
args["index"] = self.argv.popleft()
self._part("rm", self.arg.rm, {
},
"""Usage: td r [-h (--help)] <index>\n\n"""
"""Index argument is required and has to point at"""
""" an existing item.\n"""
"""\nAdditions options:\n"""
""" -h (--help)\tShows this screen.""",
**args
)
elif arg == "d" or arg == "done":
args = dict()
if not self.argv:
raise NotEnoughArgumentsError("done")
elif self.argv[0] not in ["-h", "--help"]:
args["index"] = self.argv.popleft()
self._part("done", self.arg.done, {
},
"""Usage: td d [-h (--help)] <index>\n\n"""
"""Index argument is required and has to point at"""
""" an existing item.\n"""
"""\nAdditional options:\n"""
""" -h (--help)\tShows this screen.""",
**args
)
elif arg == "D" or arg == "undone":
args = dict()
if not self.argv:
raise NotEnoughArgumentsError("undone")
elif self.argv[0] not in ["-h", "--help"]:
args["index"] = self.argv.popleft()
self._part("undone", self.arg.undone, {
},
"""Usage: td D [-h (--help)] <index>\n\n"""
"""Index argument is required and has to point at"""
""" an existing item.\n"""
"""\nAdditional options:\n"""
""" -h (--help)\tShows this screen.""",
**args
)
elif arg == "o" or arg == "options":
self._part("options", self.arg.options, {
"-g": ("glob", False), "--global": ("glob", False),
"-s": ("sort", True), "--sort": ("sort", True),
"-p": ("purge", False), "--purge": ("purge", False),
"-d": ("done", True), "--done": ("done", True),
"-D": ("undone", True), "--undone": ("undone", True)
},
"""Usage: td o [-h (--help)] [command(s)]"""
""", where [command(s)] are any of:\n\n"""
"""-g (--global)\t\tApply specified options to all"""
""" ToDo lists (store in ~/.tdrc).\n"""
"""-s (--sort) <pattern>\tAlways sorts using"""
""" <pattern>.\n"""
"""-p (--purge)\t\tAlways removes items marked"""
"""as done.\n"""
"""-d (--done) <pattern>\tAlways marks items maching"""
""" <pattern> as done.\n"""
"""-D (--undone) <pattern>\tAlways marks items maching"""
""" <pattern> as not done.\n"""
"""\nAdditional options:\n"""
""" -h (--help)\t\tShows this screen."""
)
else:
raise UnrecognizedCommandError("td", arg) | 0.000197 |
def _GetTfRecordEntries(self, path, max_entries, is_sequence,
iterator_options):
"""Extracts TFRecord examples into a dictionary of feature values.
Args:
path: The path to the TFRecord file(s).
max_entries: The maximum number of examples to load.
is_sequence: True if the input data from 'path' are tf.SequenceExamples,
False if tf.Examples. Defaults to false.
iterator_options: Options to pass to the iterator that reads the examples.
Defaults to None.
Returns:
A tuple with two elements:
- A dictionary of all features parsed thus far and arrays of their
values.
- The number of examples parsed.
"""
return self._GetEntries([path], max_entries,
partial(
tf.python_io.tf_record_iterator,
options=iterator_options), is_sequence) | 0.003155 |
def reloader_thread(softexit=False):
"""If ``soft_exit`` is True, we use sys.exit(); otherwise ``os_exit``
will be used to end the process.
"""
while RUN_RELOADER:
if code_changed():
# force reload
if softexit:
sys.exit(3)
else:
os._exit(3)
time.sleep(1) | 0.002825 |
def export(self, top=True):
"""Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
"""
out = []
if top:
out.append(self._internal_name)
out.append(self._to_str(self.number_of_records_per_hour))
out.append(self._to_str(self.data_period_name_or_description))
out.append(self._to_str(self.data_period_start_day_of_week))
out.append(self._to_str(self.data_period_start_day))
out.append(self._to_str(self.data_period_end_day))
return ",".join(out) | 0.00224 |
def GetGroups(location=None,alias=None):
"""Return all of alias' groups in the given location.
http://www.centurylinkcloud.com/api-docs/v2/#groups-get-group
:param alias: short code for a particular account. If none will use account's default alias
:param location: datacenter where group resides
"""
if alias is None: alias = clc.v1.Account.GetAlias()
if location is None: location = clc.v1.Account.GetLocation()
r = clc.v1.API.Call('post','Group/GetGroups',{'AccountAlias': alias, 'Location': location})
for group in r['HardwareGroups']: clc._GROUP_MAPPING[group['UUID']] = group['Name']
if int(r['StatusCode']) == 0: return(r['HardwareGroups']) | 0.034125 |
def PrettyPrinter(obj):
"""Pretty printers for AppEngine objects."""
if ndb and isinstance(obj, ndb.Model):
return six.iteritems(obj.to_dict()), 'ndb.Model(%s)' % type(obj).__name__
if messages and isinstance(obj, messages.Enum):
return [('name', obj.name), ('number', obj.number)], type(obj).__name__
return None | 0.01506 |
def get_tower_results(iterator, optimizer, dropout_rates):
r'''
With this preliminary step out of the way, we can for each GPU introduce a
tower for which's batch we calculate and return the optimization gradients
and the average loss across towers.
'''
# To calculate the mean of the losses
tower_avg_losses = []
# Tower gradients to return
tower_gradients = []
with tf.variable_scope(tf.get_variable_scope()):
# Loop over available_devices
for i in range(len(Config.available_devices)):
# Execute operations of tower i on device i
device = Config.available_devices[i]
with tf.device(device):
# Create a scope for all operations of tower i
with tf.name_scope('tower_%d' % i):
# Calculate the avg_loss and mean_edit_distance and retrieve the decoded
# batch along with the original batch's labels (Y) of this tower
avg_loss = calculate_mean_edit_distance_and_loss(iterator, dropout_rates, reuse=i > 0)
# Allow for variables to be re-used by the next tower
tf.get_variable_scope().reuse_variables()
# Retain tower's avg losses
tower_avg_losses.append(avg_loss)
# Compute gradients for model parameters using tower's mini-batch
gradients = optimizer.compute_gradients(avg_loss)
# Retain tower's gradients
tower_gradients.append(gradients)
avg_loss_across_towers = tf.reduce_mean(tower_avg_losses, 0)
tf.summary.scalar(name='step_loss', tensor=avg_loss_across_towers, collections=['step_summaries'])
# Return gradients and the average loss
return tower_gradients, avg_loss_across_towers | 0.003778 |
def method2jpg(output, mx, raw=False):
"""
Export method to a jpg file format
:param output: output filename
:type output: string
:param mx: specify the MethodAnalysis object
:type mx: :class:`MethodAnalysis` object
:param raw: use directly a dot raw buffer (optional)
:type raw: string
"""
buff = raw
if not raw:
buff = method2dot(mx)
method2format(output, "jpg", mx, buff) | 0.002315 |
def normrelpath(base, target):
"""
This function takes the base and target arguments as paths, and
returns an equivalent relative path from base to the target, if both
provided paths are absolute.
"""
if not all(map(isabs, [base, target])):
return target
return relpath(normpath(target), dirname(normpath(base))) | 0.002857 |
def from_file(cls, file_path: Path, w3: Web3) -> "Package":
"""
Returns a ``Package`` instantiated by a manifest located at the provided Path.
``file_path`` arg must be a ``pathlib.Path`` instance.
A valid ``Web3`` instance is required to instantiate a ``Package``.
"""
if isinstance(file_path, Path):
raw_manifest = file_path.read_text()
validate_raw_manifest_format(raw_manifest)
manifest = json.loads(raw_manifest)
else:
raise TypeError(
"The Package.from_file method expects a pathlib.Path instance."
f"Got {type(file_path)} instead."
)
return cls(manifest, w3, file_path.as_uri()) | 0.004049 |
def rot_points(pnts, pb, alfa):
"""Rotate a list of points by an angle alfa (radians) around pivotal point pb.
Intended for modifying the control points of trigonometric functions.
"""
points = [] # rotated points
calfa = math.cos(alfa)
salfa = math.sin(alfa)
for p in pnts:
s = p - pb
r = abs(s)
if r > 0: s /= r
np = (s.x * calfa - s.y * salfa, s.y * calfa + s.x * salfa)
points.append(pb + fitz.Point(np)*r)
return points | 0.005837 |
def open(self, baudrate=None, no_reader_thread=False):
"""
Opens the device.
:param baudrate: baudrate to use
:type baudrate: int
:param no_reader_thread: whether or not to automatically open the reader
thread.
:type no_reader_thread: bool
:raises: :py:class:`~alarmdecoder.util.NoDeviceError`, :py:class:`~alarmdecoder.util.CommError`
"""
try:
self._read_thread = Device.ReadThread(self)
self._device = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self._use_ssl:
self._init_ssl()
self._device.connect((self._host, self._port))
if self._use_ssl:
while True:
try:
self._device.do_handshake()
break
except SSL.WantReadError:
pass
self._id = '{0}:{1}'.format(self._host, self._port)
except socket.error as err:
raise NoDeviceError('Error opening device at {0}:{1}'.format(self._host, self._port), err)
else:
self._running = True
self.on_open()
if not no_reader_thread:
self._read_thread.start()
return self | 0.003754 |
def routing_area_2_json(self):
"""
transform ariane_clip3 routing area object to Ariane server JSON obj
:return: Ariane JSON obj
"""
LOGGER.debug("RoutingArea.routing_area_2_json")
json_obj = {
'routingAreaID': self.id,
'routingAreaName': self.name,
'routingAreaDescription': self.description,
'routingAreaType': self.type,
'routingAreaMulticast': self.multicast,
'routingAreaLocationsID': self.loc_ids,
'routingAreaSubnetsID': self.subnet_ids
}
return json.dumps(json_obj) | 0.003221 |
def hash_data(data, hasher=NoParam, base=NoParam, types=False,
hashlen=NoParam, convert=False):
"""
Get a unique hash depending on the state of the data.
Args:
data (object):
Any sort of loosely organized data
hasher (str or HASHER):
Hash algorithm from hashlib, defaults to `sha512`.
base (str or List[str]):
Shorthand key or a list of symbols. Valid keys are: 'abc', 'hex',
and 'dec'. Defaults to 'hex'.
types (bool):
If True data types are included in the hash, otherwise only the raw
data is hashed. Defaults to False.
hashlen (int):
Maximum number of symbols in the returned hash. If not specified,
all are returned. DEPRECATED. Use slice syntax instead.
convert (bool, optional, default=True):
if True, try and convert the data to json an the json is hashed
instead. This can improve runtime in some instances, however the
hash may differ from the case where convert=False.
Notes:
alphabet26 is a pretty nice base, I recommend it.
However we default to hex because it is standard.
This means the output of hashdata with base=sha1 will be the same as
the output of `sha1sum`.
Returns:
str: text - hash string
Example:
>>> import ubelt as ub
>>> print(ub.hash_data([1, 2, (3, '4')], convert=False))
60b758587f599663931057e6ebdf185a...
>>> print(ub.hash_data([1, 2, (3, '4')], base='abc', hasher='sha512')[:32])
hsrgqvfiuxvvhcdnypivhhthmrolkzej
"""
if convert and isinstance(data, six.string_types): # nocover
try:
data = json.dumps(data)
except TypeError as ex:
# import warnings
# warnings.warn('Unable to encode input as json due to: {!r}'.format(ex))
pass
base = _rectify_base(base)
hashlen = _rectify_hashlen(hashlen)
hasher = _rectify_hasher(hasher)()
# Feed the data into the hasher
_update_hasher(hasher, data, types=types)
# Get the hashed representation
text = _digest_hasher(hasher, hashlen, base)
return text | 0.001339 |
def resetEditorValue(self, checked=False):
""" Resets the editor to the default value. Also resets the children.
"""
# Block all signals to prevent duplicate inspector updates.
# No need to restore, the editors will be deleted after the reset.
for subEditor in self._subEditors:
subEditor.blockSignals(True)
self.cti.resetToDefault(resetChildren=True)
# This will commit the children as well.
self.setData(self.cti.defaultData)
self.commitAndClose() | 0.003745 |
def apply(self, search, field, value):
"""Apply lookup expression to search query."""
# We assume that the field in question has a "raw" counterpart.
return search.query('match', **{'{}.raw'.format(field): value}) | 0.008439 |
def chunk_X(
self,
select: Union[int, List[int], Tuple[int, ...], np.ndarray] = 1000,
replace: bool = True,
):
"""Return a chunk of the data matrix :attr:`X` with random or specified indices.
Parameters
----------
select
If select is an integer, a random chunk of row size = select will be returned.
If select is a list, tuple or numpy array of integers, then a chunk
with these indices will be returned.
replace
If select is an integer then ``replace=True`` specifies random sampling of indices
with replacement, ``replace=False`` - without replacement.
"""
if isinstance(select, int):
select = select if select < self.n_obs else self.n_obs
choice = np.random.choice(self.n_obs, select, replace)
elif isinstance(select, (np.ndarray, list, tuple)):
choice = np.asarray(select)
else:
raise ValueError('select should be int or array')
reverse = None
if self.isbacked:
# h5py can only slice with a sorted list of unique index values
# so random batch with indices [2, 2, 5, 3, 8, 10, 8] will fail
# this fixes the problem
indices, reverse = np.unique(choice, return_inverse=True)
selection = self.X[indices.tolist()]
else:
selection = self.X[choice]
selection = selection.toarray() if issparse(selection) else selection
return selection if reverse is None else selection[reverse] | 0.00375 |
def load_py_instance(self, is_spout):
"""Loads user defined component (spout/bolt)"""
try:
if is_spout:
spout_proto = self.pplan_helper.get_my_spout()
py_classpath = spout_proto.comp.class_name
self.logger.info("Loading Spout from: %s", py_classpath)
else:
bolt_proto = self.pplan_helper.get_my_bolt()
py_classpath = bolt_proto.comp.class_name
self.logger.info("Loading Bolt from: %s", py_classpath)
pex_loader.load_pex(self.pplan_helper.topology_pex_abs_path)
spbl_class = pex_loader.import_and_get_class(self.pplan_helper.topology_pex_abs_path,
py_classpath)
except Exception as e:
spbl = "spout" if is_spout else "bolt"
self.logger.error(traceback.format_exc())
raise RuntimeError("Error when loading a %s from pex: %s" % (spbl, str(e)))
return spbl_class | 0.010917 |
def txn_useNonce(self, server_url, timestamp, salt):
"""Return whether this nonce is present, and if it is, then
remove it from the set.
str -> bool"""
if abs(timestamp - time.time()) > nonce.SKEW:
return False
try:
self.db_add_nonce(server_url, timestamp, salt)
except self.exceptions.IntegrityError:
# The key uniqueness check failed
return False
else:
# The nonce was successfully added
return True | 0.003766 |
def save_issue_data_task(self, issue, task_id, namespace='open'):
"""Saves a issue data (tasks, etc.) to local data.
Args:
issue:
`int`. Github issue number.
task:
`int`. Asana task ID.
namespace:
`str`. Namespace for storing this issue.
"""
issue_data = self.get_saved_issue_data(issue, namespace)
if not issue_data.has_key('tasks'):
issue_data['tasks'] = [task_id]
elif task_id not in issue_data['tasks']:
issue_data['tasks'].append(task_id) | 0.004983 |
def help_center_user_segment_delete(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/user_segments#delete-user-segment"
api_path = "/api/v2/help_center/user_segments/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, method="DELETE", **kwargs) | 0.009231 |
def get_default_field_names(self, declared_fields, model_info):
"""
Return the default list of field names that will be used if the
`Meta.fields` option is not specified.
"""
return (
[self.url_field_name] +
list(declared_fields.keys()) +
list(model_info.fields.keys()) +
list(model_info.forward_relations.keys())
) | 0.004866 |
def fork_detached_process ():
"""Fork this process, creating a subprocess detached from the current context.
Returns a :class:`pwkit.Holder` instance with information about what
happened. Its fields are:
whoami
A string, either "original" or "forked" depending on which process we are.
pipe
An open binary file descriptor. It is readable by the original process
and writable by the forked one. This can be used to pass information
from the forked process to the one that launched it.
forkedpid
The PID of the forked process. Note that this process is *not* a child of
the original one, so waitpid() and friends may not be used on it.
Example::
from pwkit import cli
info = cli.fork_detached_process ()
if info.whoami == 'original':
message = info.pipe.readline ().decode ('utf-8')
if not len (message):
cli.die ('forked process (PID %d) appears to have died', info.forkedpid)
info.pipe.close ()
print ('forked process said:', message)
else:
info.pipe.write ('hello world'.encode ('utf-8'))
info.pipe.close ()
As always, the *vital* thing to understand is that immediately after a
call to this function, you have **two** nearly-identical but **entirely
independent** programs that are now both running simultaneously. Until you
execute some kind of ``if`` statement, the only difference between the two
processes is the value of the ``info.whoami`` field and whether
``info.pipe`` is readable or writeable.
This function uses :func:`os.fork` twice and also calls :func:`os.setsid`
in between the two invocations, which creates new session and process
groups for the forked subprocess. It does *not* perform other operations
that you might want, such as changing the current directory, dropping
privileges, closing file descriptors, and so on. For more discussion of
best practices when it comes to “daemonizing” processes, see (stalled)
`PEP 3143`_.
.. _PEP 3143: https://www.python.org/dev/peps/pep-3143/
"""
import os, struct
from .. import Holder
payload = struct.Struct ('L')
info = Holder ()
readfd, writefd = os.pipe ()
pid1 = os.fork ()
if pid1 > 0:
info.whoami = 'original'
info.pipe = os.fdopen (readfd, 'rb')
os.close (writefd)
retcode = os.waitpid (pid1, 0)[1]
if retcode:
raise Exception ('child process exited with error code %d' % retcode)
(info.forkedpid,) = payload.unpack (info.pipe.read (payload.size))
else:
# We're the intermediate child process. Start new session and process
# groups, detaching us from TTY signals and whatnot.
os.setsid ()
pid2 = os.fork ()
if pid2 > 0:
# We're the intermediate process; we're all done
os._exit (0)
# If we get here, we're the detached child process.
info.whoami = 'forked'
info.pipe = os.fdopen (writefd, 'wb')
os.close (readfd)
info.forkedpid = os.getpid ()
info.pipe.write (payload.pack (info.forkedpid))
return info | 0.007762 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'recognitions') and self.recognitions is not None:
_dict['recognitions'] = [x._to_dict() for x in self.recognitions]
return _dict | 0.00722 |
def like_units(a, b):
'''
like_units(a,b) yields True if a and b can be cast to each other in terms of units and False
otherwise. Non-united units are considered dimensionless units.
'''
a = quant(0.0, a) if is_unit(a) else a if is_quantity(a) else quant(a, units.dimensionless)
b = quant(0.0, b) if is_unit(b) else b if is_quantity(b) else quant(b, units.dimensionless)
if a == b: return True
try:
c = a.to(b.u)
return True
except:
return False | 0.011834 |
def bounding_box(self):
"""Bounding box (`~regions.BoundingBox`)."""
xmin = self.center.x - self.radius
xmax = self.center.x + self.radius
ymin = self.center.y - self.radius
ymax = self.center.y + self.radius
return BoundingBox.from_float(xmin, xmax, ymin, ymax) | 0.006431 |
def get_portchannel_info_by_intf_output_lacp_system_priority(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_portchannel_info_by_intf = ET.Element("get_portchannel_info_by_intf")
config = get_portchannel_info_by_intf
output = ET.SubElement(get_portchannel_info_by_intf, "output")
lacp = ET.SubElement(output, "lacp")
system_priority = ET.SubElement(lacp, "system-priority")
system_priority.text = kwargs.pop('system_priority')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004847 |
def setdict(self, D):
"""Set dictionary array."""
self.D = np.asarray(D, dtype=self.dtype)
# Factorise dictionary for efficient solves
self.lu, self.piv = sl.cho_factor(self.D, 1.0)
self.lu = np.asarray(self.lu, dtype=self.dtype) | 0.007407 |
def get_dimension_by_unit_measure_or_abbreviation(measure_or_unit_abbreviation,**kwargs):
"""
Return the physical dimension a given unit abbreviation of a measure, or the measure itself, refers to.
The search key is the abbreviation or the full measure
"""
unit_abbreviation, factor = _parse_unit(measure_or_unit_abbreviation)
units = db.DBSession.query(Unit).filter(Unit.abbreviation==unit_abbreviation).all()
if len(units) == 0:
raise HydraError('Unit %s not found.'%(unit_abbreviation))
elif len(units) > 1:
raise HydraError('Unit %s has multiple dimensions not found.'%(unit_abbreviation))
else:
dimension = db.DBSession.query(Dimension).filter(Dimension.id==units[0].dimension_id).one()
return str(dimension.name) | 0.013802 |
def is_block(bin_list):
"""Check if a bin list has exclusively consecutive bin ids.
"""
id_set = set((my_bin[1] for my_bin in bin_list))
start_id, end_id = min(id_set), max(id_set)
return id_set == set(range(start_id, end_id + 1)) | 0.003984 |
def readTEXTRECORD(self, glyphBits, advanceBits, previousRecord=None, level=1):
""" Read a SWFTextRecord """
if self.readUI8() == 0:
return None
else:
self.seek(self.tell() - 1)
return SWFTextRecord(self, glyphBits, advanceBits, previousRecord, level) | 0.009646 |
def solve(self, linear_system,
vector_factory=None,
*args, **kwargs):
'''Solve the given linear system with recycling.
The provided `vector_factory` determines which vectors are used for
deflation.
:param linear_system: the :py:class:`~krypy.linsys.LinearSystem` that
is about to be solved.
:param vector_factory: (optional) see description in constructor.
All remaining arguments are passed to the ``DeflatedSolver``.
:returns: instance of ``DeflatedSolver`` which was used to obtain the
approximate solution. The approximate solution is available under the
attribute ``xk``.
'''
# replace linear_system with equivalent TimedLinearSystem on demand
if not isinstance(linear_system, linsys.TimedLinearSystem):
linear_system = linsys.ConvertedTimedLinearSystem(linear_system)
with self.timings['vector_factory']:
if vector_factory is None:
vector_factory = self._vector_factory
# construct vector_factory if strings are provided
if vector_factory == 'RitzApproxKrylov':
vector_factory = factories.RitzFactory(
subset_evaluator=evaluators.RitzApproxKrylov()
)
elif vector_factory == 'RitzAprioriCg':
vector_factory = factories.RitzFactory(
subset_evaluator=evaluators.RitzApriori(
Bound=utils.BoundCG
)
)
elif vector_factory == 'RitzAprioriMinres':
vector_factory = factories.RitzFactory(
subset_evaluator=evaluators.RitzApriori(
Bound=utils.BoundMinres
)
)
# get deflation vectors
if self.last_solver is None or vector_factory is None:
U = numpy.zeros((linear_system.N, 0))
else:
U = vector_factory.get(self.last_solver)
with self.timings['solve']:
# solve deflated linear system
self.last_solver = self._DeflatedSolver(linear_system,
U=U,
store_arnoldi=True,
*args, **kwargs)
# return solver instance
return self.last_solver | 0.001598 |
def replycomposite(self, rtypes, nodes):
"""
Construct a I{composite} reply. This method is called when it has been
detected that the reply has multiple root nodes.
@param rtypes: A list of known return I{types}.
@type rtypes: [L{suds.xsd.sxbase.SchemaObject},...]
@param nodes: A collection of XML nodes.
@type nodes: [L{Element},...]
@return: The I{unmarshalled} composite object.
@rtype: L{Object},...
"""
dictionary = {}
for rt in rtypes:
dictionary[rt.name] = rt
unmarshaller = self.unmarshaller()
composite = Factory.object('reply')
for node in nodes:
tag = node.name
rt = dictionary.get(tag, None)
if rt is None:
if node.get('id') is None:
raise Exception('<%s/> not mapped to message part' % tag)
else:
continue
resolved = rt.resolve(nobuiltin=True)
sobject = unmarshaller.process(node, resolved)
value = getattr(composite, tag, None)
if value is None:
if rt.unbounded():
value = []
setattr(composite, tag, value)
value.append(sobject)
else:
setattr(composite, tag, sobject)
else:
if not isinstance(value, list):
value = [value, ]
setattr(composite, tag, value)
value.append(sobject)
return composite | 0.001249 |
def get_storage_controller_by_name(self, name):
"""Returns a storage controller with the given name.
in name of type str
return storage_controller of type :class:`IStorageController`
raises :class:`VBoxErrorObjectNotFound`
A storage controller with given name doesn't exist.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
storage_controller = self._call("getStorageControllerByName",
in_p=[name])
storage_controller = IStorageController(storage_controller)
return storage_controller | 0.005952 |
def response_cookies(self):
"""
This will return all cookies set
:return: dict {name, value}
"""
try:
ret = {}
for cookie_base_uris in self.response.cookies._cookies.values():
for cookies in cookie_base_uris.values():
for cookie in cookies.keys():
ret[cookie] = cookies[cookie].value
return ret
except Exception as e:
self.error = ApiError(
"Exception in making Request with:: %s\n%s" % (
e_, traceback.format_exc()))
raise Exception(self.error) | 0.003082 |
def remove_subkey(self, subkey):
"""
Remove the given subkey, if existed, from this AdfKey.
Parameters
----------
subkey : str or AdfKey
The subkey to remove.
"""
if len(self.subkeys) > 0:
key = subkey if isinstance(subkey, str) else subkey.key
for i in range(len(self.subkeys)):
if self.subkeys[i].key == key:
self.subkeys.pop(i)
break | 0.004115 |
def human_to_bytes(size):
'''
Given a human-readable byte string (e.g. 2G, 30M),
return the number of bytes. Will return 0 if the argument has
unexpected form.
.. versionadded:: 2018.3.0
'''
sbytes = size[:-1]
unit = size[-1]
if sbytes.isdigit():
sbytes = int(sbytes)
if unit == 'P':
sbytes *= 1125899906842624
elif unit == 'T':
sbytes *= 1099511627776
elif unit == 'G':
sbytes *= 1073741824
elif unit == 'M':
sbytes *= 1048576
else:
sbytes = 0
else:
sbytes = 0
return sbytes | 0.00157 |
def get_threats_lists(self):
"""Retrieve all available threat lists"""
response = self.service.threatLists().list().execute()
self.set_wait_duration(response.get('minimumWaitDuration'))
return response['threatLists'] | 0.008065 |
def unregister(self, fd):
"""
Unregister an USB-unrelated fd from poller.
Convenience method.
"""
if fd in self.__fd_set:
raise ValueError(
'This fd is a special USB event fd, it must stay registered.'
)
self.__poller.unregister(fd) | 0.00625 |
def getResponse(neighbors, weights=None):
"""
Calculated weighted response based on a list of nearest neighbors
:param neighbors: a list of neighbors, each entry is a data instance
:param weights: a numpy array of the same length as the neighbors
:return: weightedAvg: weighted average response
"""
neighborResponse = []
for x in range(len(neighbors)):
neighborResponse.append(neighbors[x][-1])
neighborResponse = np.array(neighborResponse).astype('float')
if weights is None:
weightedAvg = np.mean(neighborResponse)
else:
weightedAvg = np.sum(weights * neighborResponse)
return weightedAvg | 0.012739 |
def get_self(self):
"""GetSelf.
Read identity of the home tenant request user.
:rtype: :class:`<IdentitySelf> <azure.devops.v5_0.identity.models.IdentitySelf>`
"""
response = self._send(http_method='GET',
location_id='4bb02b5b-c120-4be2-b68e-21f7c50a4b82',
version='5.0')
return self._deserialize('IdentitySelf', response) | 0.009302 |
def _get_rate(self, mag):
"""
Calculate and return the annual occurrence rate for a specific bin.
:param mag:
Magnitude value corresponding to the center of the bin of interest.
:returns:
Float number, the annual occurrence rate for the :param mag value.
"""
mag_lo = mag - self.bin_width / 2.0
mag_hi = mag + self.bin_width / 2.0
if mag >= self.min_mag and mag < self.char_mag - DELTA_CHAR / 2:
# return rate according to exponential distribution
return (10 ** (self.a_val - self.b_val * mag_lo)
- 10 ** (self.a_val - self.b_val * mag_hi))
else:
# return characteristic rate (distributed over the characteristic
# range) for the given bin width
return (self.char_rate / DELTA_CHAR) * self.bin_width | 0.002281 |
def start_all_linking(self, mode, group):
"""Put the IM into All-Linking mode.
Puts the IM into All-Linking mode for 4 minutes.
Parameters:
mode: 0 | 1 | 3 | 255
0 - PLM is responder
1 - PLM is controller
3 - Device that initiated All-Linking is Controller
255 = Delete All-Link
group: All-Link group number (0 - 255)
"""
msg = StartAllLinking(mode, group)
self.send_msg(msg) | 0.003846 |
def clear_cache_fields(self):
'''Set cache fields to ``None``. Check :attr:`Field.as_cache`
for information regarding fields which are considered cache.'''
for field in self._meta.scalarfields:
if field.as_cache:
setattr(self, field.name, None) | 0.006944 |
def speriodogram(x, NFFT=None, detrend=True, sampling=1.,
scale_by_freq=True, window='hamming', axis=0):
"""Simple periodogram, but matrices accepted.
:param x: an array or matrix of data samples.
:param NFFT: length of the data before FFT is computed (zero padding)
:param bool detrend: detrend the data before co,puteing the FFT
:param float sampling: sampling frequency of the input :attr:`data`.
:param scale_by_freq:
:param str window:
:return: 2-sided PSD if complex data, 1-sided if real.
if a matrix is provided (using numpy.matrix), then a periodogram
is computed for each row. The returned matrix has the same shape as the input
matrix.
The mean of the input data is also removed from the data before computing
the psd.
.. plot::
:width: 80%
:include-source:
from pylab import grid, semilogy
from spectrum import data_cosine, speriodogram
data = data_cosine(N=1024, A=0.1, sampling=1024, freq=200)
semilogy(speriodogram(data, detrend=False, sampling=1024), marker='o')
grid(True)
.. plot::
:width: 80%
:include-source:
import numpy
from spectrum import speriodogram, data_cosine
from pylab import figure, semilogy, figure ,imshow
# create N data sets and make the frequency dependent on the time
N = 100
m = numpy.concatenate([data_cosine(N=1024, A=0.1, sampling=1024, freq=x)
for x in range(1, N)]);
m.resize(N, 1024)
res = speriodogram(m)
figure(1)
semilogy(res)
figure(2)
imshow(res.transpose(), aspect='auto')
.. todo:: a proper spectrogram class/function that takes care of normalisation
"""
x = np.array(x)
# array with 1 dimension case
if x.ndim == 1:
axis = 0
r = x.shape[0]
w = Window(r, window) #same size as input data
w = w.data
# matrix case
elif x.ndim == 2:
logging.debug('2D array. each row is a 1D array')
[r, c] = x.shape
w = np.array([Window(r, window).data for this in range(c)]).reshape(r,c)
if NFFT is None:
NFFT = len(x)
isreal = np.isrealobj(x)
if detrend == True:
m = np.mean(x, axis=axis)
else:
m = 0
if isreal == True:
if x.ndim == 2:
res = (abs (rfft (x*w - m, NFFT, axis=0))) ** 2. / r
else:
res = (abs (rfft (x*w - m, NFFT, axis=-1))) ** 2. / r
else:
if x.ndim == 2:
res = (abs (fft (x*w - m, NFFT, axis=0))) ** 2. / r
else:
res = (abs (fft (x*w - m, NFFT, axis=-1))) ** 2. / r
if scale_by_freq is True:
df = sampling / float(NFFT)
res*= 2 * np.pi / df
if x.ndim == 1:
return res.transpose()
else:
return res | 0.008657 |
def day(self):
'''set unit to day'''
self.magnification = 86400
self._update(self.baseNumber, self.magnification)
return self | 0.012739 |
def get_pdb_sets(self):
'''Return a record to be used for database storage. This only makes sense if self.id is set. See usage example
above.'''
assert(self.id != None)
data = []
for pdb_set in self.pdb_sets:
pdb_set_record = dict(
PPComplexID = self.id,
SetNumber = pdb_set['set_number'],
IsComplex = pdb_set['is_complex'],
Notes = pdb_set['notes'],
)
chain_records = []
for side, chain_details in sorted(pdb_set['chains'].iteritems()):
chain_records.append(dict(
PPComplexID = self.id,
SetNumber = pdb_set['set_number'],
Side = side,
ChainIndex = chain_details['chain_index'],
PDBFileID = chain_details['pdb_file_id'],
Chain = chain_details['chain_id'],
NMRModel = chain_details['nmr_model'],
))
data.append(dict(pdb_set = pdb_set_record, chain_records = chain_records))
return data | 0.027265 |
def consolidate(self, volume, source, dest, *args, **kwargs):
"""
Consolidate will move a volume of liquid from a list of sources
to a single target location. See :any:`Transfer` for details
and a full list of optional arguments.
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', 'A3') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP
>>> p300.consolidate(50, plate.cols[0], plate[1]) # doctest: +SKIP
"""
kwargs['mode'] = 'consolidate'
kwargs['mix_before'] = (0, 0)
kwargs['air_gap'] = 0
kwargs['disposal_vol'] = 0
args = [volume, source, dest, *args]
return self.transfer(*args, **kwargs) | 0.002066 |
def chat_meMessage(self, *, channel: str, text: str, **kwargs) -> SlackResponse:
"""Share a me message into a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
text (str): The message you'd like to share. e.g. 'Hello world'
"""
kwargs.update({"channel": channel, "text": text})
return self.api_call("chat.meMessage", json=kwargs) | 0.007335 |
def set(self, key, value, confidence=100):
"""
Defines the given value with the given confidence, unless the same
value is already defined with a higher confidence level.
"""
if value is None:
return
if key in self.info:
old_confidence, old_value = self.info.get(key)
if old_confidence >= confidence:
return
self.info[key] = (confidence, value) | 0.004425 |
def public_notes_500(self, key, value):
"""Populate the ``public_notes`` key."""
return [
{
'source': value.get('9'),
'value': public_note,
} for public_note in force_list(value.get('a'))
] | 0.004149 |
def liftover_cpra(self, chromosome, position, verbose=False):
"""
Given chromosome, position in 1-based co-ordinates,
This will use pyliftover to liftover a CPRA, will return a (c,p) tuple or raise NonUniqueLiftover if no unique
and strand maintaining liftover is possible
:param chromosome: string with the chromosome as it's represented in the from_genome
:param position: position on chromosome (will be cast to int)
:return: ((str) chromosome, (int) position) or None if no liftover
"""
chromosome = str(chromosome)
position = int(position)
# Perform the liftover lookup, shift the position by 1 as pyliftover deals in 0-based co-ords
new = self.liftover.convert_coordinate(chromosome, position - 1)
# This has to be here as new will be NoneType when the chromosome doesn't exist in the chainfile
if new:
# If the liftover is unique
if len(new) == 1:
# If the liftover hasn't changed strand
if new[0][2] == "+":
# Set the co-ordinates to the lifted-over ones and write out
new_chromosome = str(new[0][0])
# Shift the position forward by one to convert back to a 1-based co-ords
new_position = int(new[0][1]) + 1
return new_chromosome, new_position
else:
exception_string = "{},{} has a flipped strand in liftover: {}".format(chromosome, position, new)
else:
exception_string = "{},{} lifts over to multiple positions: {}".format(chromosome, position, new)
elif new is None:
exception_string = "Chromosome '{}' provided not in chain file".format(chromosome)
if verbose:
logging.error(exception_string)
return None, None | 0.005774 |
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema | 0.004608 |
def dijkstra(G, start, weight='weight'):
"""
Compute shortest path length between satrt
and all other reachable nodes for a weight graph.
return -> ({vertex: weight form start, }, {vertex: predeseccor, })
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
visited = {start: 0}
path = {}
vertices = set(G.vertices.keys())
while vertices:
min_vertex = None
for vertex in vertices:
if vertex in visited:
if min_vertex is None or visited[vertex] < visited[min_vertex]:
min_vertex = vertex
if min_vertex is None:
break
vertices.remove(min_vertex)
current_weight = visited[min_vertex]
for edge in G.vertices[min_vertex]:
edge_weight = current_weight + G.edges[(min_vertex, edge)][weight]
if edge not in visited or edge_weight < visited[edge]:
visited[edge] = edge_weight
path[edge] = min_vertex
return visited, path | 0.000914 |
def API_GET(self, courseid, taskid=None): # pylint: disable=arguments-differ
"""
List tasks available to the connected client. Returns a dict in the form
::
{
"taskid1":
{
"name": "Name of the course", #the name of the course
"authors": [],
"deadline": "",
"status": "success" # can be "succeeded", "failed" or "notattempted"
"grade": 0.0,
"grade_weight": 0.0,
"context": "" # context of the task, in RST
"problems": # dict of the subproblems
{
# see the format of task.yaml for the content of the dict. Contains everything but
# responses of multiple-choice and match problems.
}
}
#...
}
If you use the endpoint /api/v0/courses/the_course_id/tasks/the_task_id, this dict will contain one entry or the page will return 404 Not
Found.
"""
try:
course = self.course_factory.get_course(courseid)
except:
raise APINotFound("Course not found")
if not self.user_manager.course_is_open_to_user(course, lti=False):
raise APIForbidden("You are not registered to this course")
if taskid is None:
tasks = course.get_tasks()
else:
try:
tasks = {taskid: course.get_task(taskid)}
except:
raise APINotFound("Task not found")
output = []
for taskid, task in tasks.items():
task_cache = self.user_manager.get_task_cache(self.user_manager.session_username(), task.get_course_id(), task.get_id())
data = {
"id": taskid,
"name": task.get_name(self.user_manager.session_language()),
"authors": task.get_authors(self.user_manager.session_language()),
"deadline": task.get_deadline(),
"status": "notviewed" if task_cache is None else "notattempted" if task_cache["tried"] == 0 else "succeeded" if task_cache["succeeded"] else "failed",
"grade": task_cache.get("grade", 0.0) if task_cache is not None else 0.0,
"grade_weight": task.get_grading_weight(),
"context": task.get_context(self.user_manager.session_language()).original_content(),
"problems": []
}
for problem in task.get_problems():
pcontent = problem.get_original_content()
pcontent["id"] = problem.get_id()
if pcontent["type"] == "match":
del pcontent["answer"]
if pcontent["type"] == "multiple_choice":
pcontent["choices"] = {key: val["text"] for key, val in enumerate(pcontent["choices"])}
pcontent = self._check_for_parsable_text(pcontent)
data["problems"].append(pcontent)
output.append(data)
return 200, output | 0.005348 |
def append_formula(self, formula, no_return=True):
"""
This method can be used to add a given list of clauses into the
solver.
:param formula: a list of clauses.
:param no_return: check solver's internal formula and return the
result, if set to ``False``.
:type formula: iterable(iterable(int))
:type no_return: bool
The ``no_return`` argument is set to ``True`` by default.
:rtype: bool if ``no_return`` is set to ``False``.
.. code-block:: python
>>> cnf = CNF()
... # assume the formula contains clauses
>>> s = Solver()
>>> s.append_formula(cnf.clauses, no_return=False)
True
"""
if self.solver:
res = self.solver.append_formula(formula, no_return)
if not no_return:
return res | 0.002092 |
def create_dir(directory):
"""Create given directory, if doesn't exist.
Parameters
----------
directory : string
Directory path (can be relative or absolute)
Returns
-------
string
Absolute directory path
"""
if not os.access(directory, os.F_OK):
os.makedirs(directory)
return os.path.abspath(directory) | 0.00271 |
def to_gpx(self):
"""Converts track to a GPX format
Uses GPXPY library as an intermediate format
Returns:
A string with the GPX/XML track
"""
gpx_segments = []
for segment in self.segments:
gpx_points = []
for point in segment.points:
time = ''
if point.time:
iso_time = point.time.isoformat().split('.')[0]
time = '<time>%s</time>' % iso_time
gpx_points.append(
u'<trkpt lat="%f" lon="%f">%s</trkpt>' % (point.lat, point.lon, time)
)
points = u'\n\t\t\t'.join(gpx_points)
gpx_segments.append(u'\t\t<trkseg>\n\t\t\t%s\n\t\t</trkseg>' % points)
segments = u'\t\n'.join(gpx_segments)
content = [
u'<?xml version="1.0" encoding="UTF-8"?>',
u'<gpx xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.topografix.com/GPX/1/0" xsi:schemaLocation="http://www.topografix.com/GPX/1/0 http://www.topografix.com/GPX/1/0/gpx.xsd" version="1.0" creator="GatherMySteps">',
u'\t<trk>',
segments,
u'\t</trk>',
u'</gpx>'
]
return u'\n'.join(content) | 0.003876 |
def lines(self):
'''Display the system bonds as lines.
'''
if "bonds" not in self.topology:
return
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
color_array = np.array([get_atom_color(t) for t in self.topology['atom_types']])
lines = self.add_representation('lines', {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end],
'startColors': color_array[bond_start].tolist(),
'endColors': color_array[bond_end].tolist()})
def update(self=self, lines=lines):
bond_start, bond_end = zip(*self.topology['bonds'])
bond_start = np.array(bond_start)
bond_end = np.array(bond_end)
self.update_representation(lines, {'startCoords': self.coordinates[bond_start],
'endCoords': self.coordinates[bond_end]})
self.update_callbacks.append(update)
self.autozoom(self.coordinates) | 0.008244 |
def _find_class(self, class_name):
"Resolve the class from the name."
classes = {}
classes.update(globals())
classes.update(self.INSTANCE_CLASSES)
logger.debug(f'looking up class: {class_name}')
cls = classes[class_name]
logger.debug(f'found class: {cls}')
return cls | 0.006042 |
def build_catalog(self, body):
"""
Create the I{catalog} of multiref nodes by id and the list of
non-multiref nodes.
@param body: A soap envelope body node.
@type body: L{Element}
"""
for child in body.children:
if self.soaproot(child):
self.nodes.append(child)
id = child.get('id')
if id is None:
continue
key = '#%s' % id
self.catalog[key] = child | 0.004024 |
def hidelist(self, window_name, object_name):
"""
Hide combo box list / menu
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success.
@rtype: integer
"""
object_handle = self._get_object_handle(window_name, object_name)
object_handle.activate()
object_handle.sendKey(AXKeyCodeConstants.ESCAPE)
return 1 | 0.006107 |
def switch(self):
"""Switch if time for eAgc has come"""
t = self.system.dae.t
for idx in range(0, self.n):
if t >= self.tl[idx]:
if self.en[idx] == 0:
self.en[idx] = 1
logger.info(
'Extended ACE <{}> activated at t = {}.'.format(
self.idx[idx], t)) | 0.005089 |
def evaluate(self, text):
"""
Given a string of `text`, compute confusion matrix for the
classification task.
"""
cx = BinaryConfusion()
for (L, P, R, gold, _) in Detector.candidates(text):
guess = self.predict(L, P, R)
cx.update(gold, guess)
if not gold and guess:
logger.debug("False pos.: L='{}', R='{}'.".format(L, R))
elif gold and not guess:
logger.debug("False neg.: L='{}', R='{}'.".format(L, R))
return cx | 0.003636 |
def _connect(self, context):
"""Initialize the database connection."""
if __debug__:
log.info("Connecting " + self.engine.partition(':')[0] + " database layer.", extra=dict(
uri = redact_uri(self.uri, self.protect),
config = self.config,
alias = self.alias,
))
self.connection = context.db[self.alias] = self._connector(self.uri, **self.config) | 0.058201 |
def simpixel(new=0, autoraise=True):
"""Open an instance of simpixel in the browser"""
simpixel_driver.open_browser(new=new, autoraise=autoraise) | 0.012422 |
def get_parameter(self):
"""Obtain list parameter object from the current widget state.
:returns: A DefaultValueParameter from the current state of widget
:rtype: DefaultValueParameter
"""
radio_button_checked_id = self.input_button_group.checkedId()
# No radio button checked, then default value = None
if radio_button_checked_id == -1:
self._parameter.value = None
# The last radio button (custom) is checked, get the value from the
# line edit
elif radio_button_checked_id == len(self._parameter.options) - 1:
self._parameter.options[radio_button_checked_id] = \
self.custom_value.value()
self._parameter.value = self.custom_value.value()
else:
self._parameter.value = self._parameter.options[
radio_button_checked_id]
return self._parameter | 0.002169 |
def resumeProducing(self):
"""
Starts or resumes the retrieval of messages from the server queue.
This method starts receiving messages from the server, they will be
passed to the consumer callback.
.. note:: This is called automatically when :meth:`.consume` is called,
so users should not need to call this unless :meth:`.pauseProducing`
has been called.
Returns:
defer.Deferred: fired when the production is ready to start
"""
# Start consuming
self._running = True
for consumer in self._consumers.values():
queue_object, _ = yield consumer.channel.basic_consume(
queue=consumer.queue, consumer_tag=consumer.tag
)
deferred = self._read(queue_object, consumer)
deferred.addErrback(
lambda f: _legacy_twisted_log.msg,
"_read failed on consumer {c}",
c=consumer,
logLevel=logging.ERROR,
)
_legacy_twisted_log.msg("AMQP connection successfully established") | 0.002671 |
def forms(self):
"""
:rtype: twilio.rest.authy.v1.form.FormList
"""
if self._forms is None:
self._forms = FormList(self)
return self._forms | 0.010471 |
def get_all_hosted_zones(self, start_marker=None, zone_list=None):
"""
Returns a Python data structure with information about all
Hosted Zones defined for the AWS account.
:param int start_marker: start marker to pass when fetching additional
results after a truncated list
:param list zone_list: a HostedZones list to prepend to results
"""
params = {}
if start_marker:
params = {'marker': start_marker}
response = self.make_request('GET', '/%s/hostedzone' % self.Version,
params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='HostedZones',
item_marker=('HostedZone',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
if zone_list:
e['ListHostedZonesResponse']['HostedZones'].extend(zone_list)
while e['ListHostedZonesResponse'].has_key('NextMarker'):
next_marker = e['ListHostedZonesResponse']['NextMarker']
zone_list = e['ListHostedZonesResponse']['HostedZones']
e = self.get_all_hosted_zones(next_marker, zone_list)
return e | 0.002757 |
def _ParseTimezoneOption(self, options):
"""Parses the timezone options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
time_zone_string = self.ParseStringOption(options, 'timezone')
if isinstance(time_zone_string, py2to3.STRING_TYPES):
if time_zone_string.lower() == 'list':
self.list_timezones = True
elif time_zone_string:
try:
pytz.timezone(time_zone_string)
except pytz.UnknownTimeZoneError:
raise errors.BadConfigOption(
'Unknown time zone: {0:s}'.format(time_zone_string))
self._preferred_time_zone = time_zone_string | 0.007022 |
def logout(self,
command='exit',
note=None,
echo=None,
timeout=shutit_global.shutit_global_object.default_timeout,
nonewline=False,
loglevel=logging.DEBUG):
"""Logs the user out. Assumes that login has been called.
If login has never been called, throw an error.
@param command: Command to run to log out (default=exit)
@param note: See send()
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_session = self.get_current_shutit_pexpect_session()
return shutit_pexpect_session.logout(ShutItSendSpec(shutit_pexpect_session,
send=command,
note=note,
timeout=timeout,
nonewline=nonewline,
loglevel=loglevel,
echo=echo)) | 0.037418 |
def p_global_var(p):
'''global_var : VARIABLE
| DOLLAR variable
| DOLLAR LBRACE expr RBRACE'''
if len(p) == 2:
p[0] = ast.Variable(p[1], lineno=p.lineno(1))
elif len(p) == 3:
p[0] = ast.Variable(p[2], lineno=p.lineno(1))
else:
p[0] = ast.Variable(p[3], lineno=p.lineno(1)) | 0.002874 |
def install_integration(self, id, **kwargs): # noqa: E501
"""Installs a Wavefront integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.install_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerIntegrationStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.install_integration_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.install_integration_with_http_info(id, **kwargs) # noqa: E501
return data | 0.002208 |
def check_data_types(self, ds):
'''
Checks the data type of all netCDF variables to ensure they are valid
data types under CF.
CF §2.2 The netCDF data types char, byte, short, int, float or real, and
double are all acceptable
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
'''
fails = []
total = len(ds.variables)
for k, v in ds.variables.items():
if (v.dtype.kind != 'S' and
all(v.dtype.type != t for t in
(np.character, np.dtype('|S1'),
np.dtype('b'), np.dtype('i2'),
np.dtype('i4'), np.float32, np.double))):
fails.append('The variable {} failed because the datatype is {}'.format(k, v.datatype))
return Result(BaseCheck.HIGH, (total - len(fails), total), self.section_titles["2.2"], msgs=fails) | 0.006042 |
def __validate_email(self, email):
"""Checks if a string looks like an email address"""
e = re.match(self.EMAIL_ADDRESS_REGEX, email, re.UNICODE)
if e:
return email
else:
error = "Invalid email address: " + str(email)
msg = self.GRIMOIRELAB_INVALID_FORMAT % {'error': error}
raise InvalidFormatError(cause=msg) | 0.005115 |
def read_data(self, blocksize=4096):
"""Generates byte strings reflecting the audio data in the file.
"""
frames = ctypes.c_uint(blocksize // self._client_fmt.mBytesPerFrame)
buf = ctypes.create_string_buffer(blocksize)
buflist = AudioBufferList()
buflist.mNumberBuffers = 1
buflist.mBuffers[0].mNumberChannels = \
self._client_fmt.mChannelsPerFrame
buflist.mBuffers[0].mDataByteSize = blocksize
buflist.mBuffers[0].mData = ctypes.cast(buf, ctypes.c_void_p)
while True:
check(_coreaudio.ExtAudioFileRead(
self._obj, ctypes.byref(frames), ctypes.byref(buflist)
))
assert buflist.mNumberBuffers == 1
size = buflist.mBuffers[0].mDataByteSize
if not size:
break
data = ctypes.cast(buflist.mBuffers[0].mData,
ctypes.POINTER(ctypes.c_char))
blob = data[:size]
yield blob | 0.001963 |
def pivot(self, attrlist):
"""Pivots the data using the given attributes, returning a L{PivotTable}.
@param attrlist: list of attributes to be used to construct the pivot table
@type attrlist: list of strings, or string of space-delimited attribute names
"""
if isinstance(attrlist, basestring):
attrlist = attrlist.split()
if all(a in self._indexes for a in attrlist):
return PivotTable(self, [], attrlist)
else:
raise ValueError("pivot can only be called using indexed attributes") | 0.010292 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.