text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def model_field_attr(model, model_field, attr):
"""
Returns the specified attribute for the specified field on the model class.
"""
fields = dict([(field.name, field) for field in model._meta.fields])
return getattr(fields[model_field], attr) | 0.003817 |
def execute(self, triple_map, output, **kwargs):
"""Method executes mapping between CSV source and
output RDF
args:
triple_map(SimpleNamespace): Triple Map
"""
subject = self.generate_term(term_map=triple_map.subjectMap,
**kwargs)
start_size = len(output)
all_subjects = []
for pred_obj_map in triple_map.predicateObjectMap:
predicate = pred_obj_map.predicate
if pred_obj_map.template is not None:
object_ = self.generate_term(term_map=pred_obj_map, **kwargs)
if len(str(object)) > 0:
output.add((
subject,
predicate,
object_))
if pred_obj_map.parentTriplesMap is not None:
self.__handle_parents__(
parent_map=pred_obj_map.parentTriplesMap,
subject=subject,
predicate=predicate,
**kwargs)
if pred_obj_map.reference is not None:
object_ = self.generate_term(term_map=pred_obj_map,
**kwargs)
if object_ and len(str(object_)) > 0:
output.add((subject, predicate, object_))
if pred_obj_map.constant is not None:
output.add((subject, predicate, pred_obj_map.constant))
finish_size = len(output)
if finish_size > start_size:
output.add((subject,
NS_MGR.rdf.type.rdflib,
triple_map.subjectMap.class_))
all_subjects.append(subject)
return all_subjects | 0.002277 |
def multiline_statement(line, previous_line=''):
"""Return True if this is part of a multiline statement."""
for symbol in '\\:;':
if symbol in line:
return True
sio = io.StringIO(line)
try:
list(tokenize.generate_tokens(sio.readline))
return previous_line.rstrip().endswith('\\')
except (SyntaxError, tokenize.TokenError):
return True | 0.0025 |
def parsexml(self, xmlstring, modules, source=None):
"""Parses the docstrings out of the specified xml file.
:arg source: the path to the file from which the XML string was extracted.
"""
result = {}
from fortpy.utility import XML_fromstring
xmlroot = XML_fromstring(xmlstring, source)
if xmlroot.tag == "fortpy" and "mode" in xmlroot.attrib and \
xmlroot.attrib["mode"] == "docstring":
#First, cycle through the kids to find the <global> tag (if any
#exist). It's children will apply to any of the other tags we find
#and we will have to update their attributes accordingly.
xmlglobals = {}
for child in xmlroot.iterfind("globals"):
_update_globals(list(child), xmlglobals)
_set_global_defaults(xmlglobals)
#We fill the dictionary with decorates names as keys and lists
#of the xml docstring elements as values.
for child in xmlroot:
if child.tag == "globals":
continue
xmltags = []
if child.tag == "decorates" and "name" in child.attrib:
decorates = child.attrib["name"]
xmltags.extend(list(child))
elif "decorates" in child.attrib:
decorates = child.attrib["decorates"]
xmltags.append(child)
for xtag in xmltags:
_update_from_globals(xtag, xmlglobals, child)
if decorates in result:
result[decorates].extend(xmltags)
else:
result[decorates] = xmltags
#Loop through all the docstrings we found and team them up with
#their respective module members.
self._xml_update_modules(result, modules) | 0.005744 |
def resolve(self, host: str) -> ResolveResult:
'''Resolve hostname.
Args:
host: Hostname.
Returns:
Resolved IP addresses.
Raises:
DNSNotFound if the hostname could not be resolved or
NetworkError if there was an error connecting to DNS servers.
Coroutine.
'''
_logger.debug(__('Lookup address {0}.', host))
try:
host = self.hook_dispatcher.call(PluginFunctions.resolve_dns, host
) or host
except HookDisconnected:
pass
cache_key = (host, self._family)
if self._cache and cache_key in self._cache:
resolve_result = self._cache[cache_key]
_logger.debug(__('Return by cache {0}.', resolve_result))
if self._rotate:
resolve_result.rotate()
return resolve_result
address_infos = []
dns_infos = []
if not self.dns_python_enabled:
families = ()
elif self._family == IPFamilyPreference.any:
families = (socket.AF_INET, socket.AF_INET6)
elif self._family == IPFamilyPreference.ipv4_only:
families = (socket.AF_INET, )
else:
families = (socket.AF_INET6, )
for family in families:
datetime_now = datetime.datetime.utcnow()
try:
answer = yield from self._query_dns(host, family)
except DNSNotFound:
continue
else:
dns_infos.append(DNSInfo(datetime_now, answer.response.answer))
address_infos.extend(self._convert_dns_answer(answer))
if not address_infos:
# Maybe the address is defined in hosts file or mDNS
if self._family == IPFamilyPreference.any:
family = socket.AF_UNSPEC
elif self._family == IPFamilyPreference.ipv4_only:
family = socket.AF_INET
else:
family = socket.AF_INET6
results = yield from self._getaddrinfo(host, family)
address_infos.extend(self._convert_addrinfo(results))
_logger.debug(__('Resolved addresses: {0}.', address_infos))
resolve_result = ResolveResult(address_infos, dns_infos)
if self._cache:
self._cache[cache_key] = resolve_result
self.event_dispatcher.notify(PluginFunctions.resolve_dns_result, host, resolve_result)
if self._rotate:
resolve_result.shuffle()
return resolve_result | 0.00115 |
def html2vtml(vtmarkup):
""" Convert hypertext markup into vt markup.
The output can be given to `vtmlrender` for converstion to VT100
sequences. """
try:
htmlconv.feed(vtmarkup)
htmlconv.close()
return htmlconv.getvalue()
finally:
htmlconv.reset() | 0.003333 |
def _is_flag(cls, arg):
"""Check if an argument is a flag.
A flag starts with - or -- and the next character must be a letter
followed by letters, numbers, - or _. Currently we only check the
alpha'ness of the first non-dash character to make sure we're not just
looking at a negative number.
Returns:
bool: Whether the argument is a flag.
"""
if arg == '--':
return False
if not arg.startswith('-'):
return False
if arg.startswith('--'):
first_char = arg[2]
else:
first_char = arg[1]
if not first_char.isalpha():
return False
return True | 0.00277 |
def convert_to_raw(number, value, length):
"""
Get the value of an option as a ByteArray.
:param number: the option number
:param value: the option value
:param length: the option length
:return: the value of an option as a BitArray
"""
opt_type = defines.OptionRegistry.LIST[number].value_type
if length == 0 and opt_type != defines.INTEGER:
return bytes()
elif length == 0 and opt_type == defines.INTEGER:
return 0
elif opt_type == defines.STRING:
if isinstance(value, bytes):
return value.decode("utf-8")
elif opt_type == defines.OPAQUE:
if isinstance(value, bytes):
return value
else:
return bytes(value, "utf-8")
if isinstance(value, tuple):
value = value[0]
if isinstance(value, str):
value = str(value)
if isinstance(value, str):
return bytearray(value, "utf-8")
elif isinstance(value, int):
return value
else:
return bytearray(value) | 0.001742 |
def generate_key_pair(size=2048, public_exponent=65537, as_string=True):
"""
Generate a public/private key pair.
:param size: Optional. Describes how many bits long the key should be, larger keys provide more security,
currently 1024 and below are considered breakable, and 2048 or 4096 are reasonable default
key sizes for new keys. Defaults to 2048.
:param public_exponent: Optional. Indicates what one mathematical property of the key generation will be.
65537 is the default and should almost always be used.
:param as_string: Optional. If True, return tuple of strings. If false, return tuple of RSA key objects.
Defaults to True.
:return: (PrivateKey<string>, PublicKey<string>)
:return: (
`RSAPrivateKey <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey>`_,
`RSAPublicKey <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey>`_)
"""
private = rsa.generate_private_key(
public_exponent=public_exponent,
key_size=size,
backend=default_backend()
)
public = private.public_key()
if not as_string:
return private, public
pem_private = private.private_bytes(Encoding.PEM, PrivateFormat.PKCS8, NoEncryption()).decode(ENCODING)
pem_public = public.public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo).decode(ENCODING)
return pem_private, pem_public | 0.005795 |
def record(self, ekey, entry, diff=False):
"""Records the specified entry to the key-value store under the specified
entity key.
Args:
ekey (str): fqdn/uuid of the method/object to store the entry for.
entry (dict): attributes and values gleaned from the execution.
diff (bool): when True, the "c" element of `entry` will be diffed
against previous entries under the same `ekey` if their method
(attribute "m") matches.
"""
if ekey not in self.entities:
self.entities[ekey] = []
#See if we need to diff the code to compress it.
if diff and len(self.entities[ekey]) > 0:
#Compress the code element of the current entry that we are saving.
from acorn.logging.diff import cascade, compress
sequence = [e["c"] for e in self.entities[ekey]
if e["m"] == entry["m"]]
original = cascade(sequence)
difference = compress(original, entry["c"])
#Now, overwrite the entry with the compressed version.
entry["c"] = difference
self.entities[ekey].append(entry)
#We also need to make sure we have uuids and origin information stored
#for any uuids present in the parameter string.
from uuid import UUID
uid = None
if entry["r"] is not None:
uid = entry["r"]
elif isinstance(ekey, str):
#For many methods we don't duplicate the UUID in the returns part
#because it wastes space. In those cases, the ekey is a UUID.
try:
uid = str(UUID(ekey))
except ValueError: # pragma: no cover
pass
if uid is not None and isinstance(uid, str):
self.log_uuid(uid)
#For the markdown and function definitions, we don't have any arguments,
#so we set that to None to save space.
if entry["a"] is None:
return
for larg in entry["a"]["_"]:
#We use the constructor to determine if the format of the argument
#is a valid UUID; if it isn't then we catch the error and keep
#going.
if not isinstance(larg, str):
continue
try:
uid = str(UUID(larg))
self.log_uuid(uid)
except ValueError:
#This was obviously not a UUID, we don't need to worry about it,
#it has a user-readable string instead.
pass
#We also need to handle the keyword arguments; these are keyed by name.
for key, karg in entry["a"].items():
if key == "_" or not isinstance(karg, str):
#Skip the positional arguments since we already handled them.
continue
try:
uid = str(UUID(karg))
self.log_uuid(uid)
except ValueError:
pass | 0.008189 |
def collapse_entrez_equivalencies(graph: BELGraph):
"""Collapse all equivalence edges away from Entrez. Assumes well formed, 2-way equivalencies."""
relation_filter = build_relation_predicate(EQUIVALENT_TO)
source_namespace_filter = build_source_namespace_filter(['EGID', 'EG', 'ENTREZ'])
edge_predicates = [
relation_filter,
source_namespace_filter,
]
_collapse_edge_passing_predicates(graph, edge_predicates=edge_predicates) | 0.00641 |
def save_stream(self, key, binary=False):
"""
Yield a file object representing `key`
:param str key: The file to save to
:param bool binary: Whether we should treat it as binary
:return:
"""
mode = 'wb' if binary else 'w'
with open(os.path.join(self.uri, key), mode) as f:
yield f | 0.005602 |
def _harvest_validate(self, userkwargs):
"""Validate and Plant user provided arguments
- Go through and plants the seedlings
for any user arguments provided.
- Validate the arguments, cleaning and adapting (valideer wise)
- Extract negatives "!" arguments
"""
# the valideer to parse the
# user arguemnts when watering
parser = {}
userkwargs.update(self.network_kwargs)
# a simple set of original provided argument keys (used in IGNORES)
original_kwargs = set(map(lambda k: k.split('_')[1] if k.find('_')>-1 else k, userkwargs.keys()))
# list of columns that are required from seeds
requires = []
# -------------
# Clean up Aggs
# -------------
for key in userkwargs.keys():
# agg example: "avg_total", "max_tax"
if key.find('_') > 0:
agg, base = tuple(key.split('_'))
if base in userkwargs:
if type(userkwargs[base]) is not list:
userkwargs[base] = [(None, userkwargs[base])]
userkwargs[base].append( (agg, userkwargs.pop(key)) )
else:
userkwargs[base] = [(agg, userkwargs.pop(key))]
# -----------------
# Process Arguments
# -----------------
for key, seed in self.arguments.iteritems():
# --------------
# Argument Alias
# --------------
if seed.get('alias') and key in userkwargs:
# pop the value form the user kwargs (to change the key later)
value = userkwargs.pop(key) if key in userkwargs else NotImplemented
# for duplicate keys
oldkey = key+""
# change the key
key = seed.get('alias')
# change the seed
seed = get(self.arguments, seed.get('alias'))
# set the new key:value
if value is not NotImplemented:
if key in userkwargs:
raise valideer.ValidationError("Argument alias already specified for `%s` via `%s`" % (oldkey, key), oldkey)
userkwargs[key] = value
# can provide multiple arguments
if key.endswith('[]'):
multi = True
key = key[:-2]
else:
multi = False
# get value(s) from user
if key in userkwargs:
value = userkwargs.pop(key)
elif seed.get('copy'):
value = userkwargs.get(seed.get('copy'))
else:
value = seed.get('default')
# no argument provided, lets continue)
if value is None or value == []:
if seed.get('required'):
raise valideer.ValidationError("missing required property: %s" % key, key)
else:
continue
# add requires
requires.extend(array(get(seed, 'requires', [])))
# -----------
# Inheritance
# -----------
# not permited from arguements yet. would need to happen above the ""PROCESS ARGUMENT"" block
# self._inherit(*array(get(seed, 'inherit', [])))
if type(value) is list and type(value[0]) is tuple:
# complex
for v in value:
ud, pd = self._harvest_args(key, seed, v, multi)
userkwargs.update(ud)
parser.update(pd)
else:
ud, pd = self._harvest_args(key, seed, value, multi)
userkwargs.update(ud)
parser.update(pd)
# ------------
# Ignored Keys
# ------------
for seed in self.seeds:
ignores = set(array(get(seed, 'ignore')))
if ignores:
if ignores & original_kwargs:
if not get(seed, 'silent'):
additionals = ignores & original_kwargs
raise valideer.ValidationError("additional properties: %s" % ",".join(additionals), additionals)
[userkwargs.pop(key) for key in ignores if key in userkwargs]
# -------------------------
# Custom Operators (part 1)
# -------------------------
operators = {}
for key, value in userkwargs.items():
rk = key
agg = None
if key.find('_')>-1:
agg, rk = tuple(key.split('_'))
seed = self.arguments.get(rk, self.arguments.get(rk+'[]'))
if seed:
if type(value) is list:
operators[key] = []
# need to remove the operator for validating
new_values = []
for v in value:
operator, v = self._operator(v, *seed.get('column', "").rsplit("::", 1))
new_values.append(v)
operators[key].append((agg, operator) if agg else operator)
userkwargs[key] = new_values
else:
operator, value = self._operator(value, *seed.get('column', "").rsplit("::", 1))
operators[key] = (agg, operator) if agg else operator
userkwargs[key] = value
# -----------------
# Plant Sort Method
# -----------------
if 'sortby' in userkwargs:
seed = self.arguments.get(userkwargs['sortby'].lower(), self.arguments.get(userkwargs['sortby'].lower()+'[]'))
if seed:
seed['id'] = str(userkwargs['sortby'].lower())
for r in set(requires):
if userkwargs.get(r) is None:
raise valideer.ValidationError("required property not set: %s" % r, r)
# --------
# Validate
# --------
parser = valideer.parse(parser, additional_properties=False)
validated = parser.validate(userkwargs, adapt=self.navigator.adapter())
validated.update(self.network_kwargs)
# operators validated
# --------------------------- | --------------------------------
# { {
# "type": ["!", "!"], "type": ['a', 'b'],
# "total": "<", "total": "50",
# "tax": ("avg, ">"), "tax": "1",
# "time": None "time": "2014"
# } }
return operators, validated | 0.002554 |
def _action(self, res):
"""Returns JSON response or raise exception if errors are present"""
try:
j = res.json()
except:
res.raise_for_status()
j = {}
if 'Retry-After' in res.headers:
raise HTTPError('403 Forbidden: API rate-limit has been reached until {}.'
'See http://freshdesk.com/api#ratelimit'.format(res.headers['Retry-After']))
if 'require_login' in j:
raise HTTPError('403 Forbidden: API key is incorrect for this domain')
if 'error' in j:
raise HTTPError('{}: {}'.format(j.get('description'),
j.get('errors')))
# Catch any other errors
try:
res.raise_for_status()
except Exception as e:
raise HTTPError("{}: {}".format(e, j))
return j | 0.006689 |
def GetFormattedField(self, event, field_name):
"""Formats the specified field.
Args:
event (EventObject): event.
field_name (str): name of the field.
Returns:
str: value of the field.
"""
callback_name = self._FIELD_FORMAT_CALLBACKS.get(field_name, None)
callback_function = None
if callback_name:
callback_function = getattr(self, callback_name, None)
if callback_function:
output_value = callback_function(event)
else:
output_value = getattr(event, field_name, '-')
if output_value is None:
output_value = '-'
elif not isinstance(output_value, py2to3.STRING_TYPES):
output_value = '{0!s}'.format(output_value)
return output_value | 0.008174 |
def connect(self):
"""
Connects to publisher
"""
self.client = redis.Redis(
host=self.host, port=self.port, password=self.password) | 0.011429 |
def get_valid_https_verify(value):
'''
Get a value that can be the boolean representation of a string
or a boolean itself and returns It as a boolean.
If this is not the case, It returns a string.
:value: The HTTPS_verify input value. A string can be passed as a path
to a CA_BUNDLE certificate
:returns: True, False or a string.
'''
http_verify_value = value
bool_values = {'false': False, 'true': True}
if isinstance(value, bool):
http_verify_value = value
elif (isinstance(value, str) or isinstance(value, unicode)) and value.lower() in bool_values.keys():
http_verify_value = bool_values[value.lower()]
return http_verify_value | 0.002817 |
def create_relationships(cls, id, related_collection_name, request_json):
r"""
Used to create relationship(s) between the id node and the nodes identified in the included resource \
identifier objects.
:param id: The 'id' field of the node on the left side of the relationship in the database. The id field must \
be set in the model -- it is not the same as the node id
:param related_collection_name: The name of the relationship
:param request_json: request_json: a dictionary formatted according to the specification at \
http://jsonapi.org/format/#crud-updating-relationships
:return: A response according to the same specification
"""
try:
this_resource = cls.nodes.get(id=id, active=True)
related_collection = getattr(this_resource, related_collection_name)
if type(related_collection) in (One, ZeroOrOne): # Cardinality <= 1 so update_relationship should be used
r = application_codes.error_response([application_codes.FORBIDDEN_VIOLATION])
else:
data = request_json['data']
for rsrc_identifier in data:
the_new_node = cls.get_class_from_type(rsrc_identifier['type']).nodes.get(id=rsrc_identifier['id'])
rel_attrs = rsrc_identifier.get('meta')
if not rel_attrs or isinstance(rel_attrs, dict):
related_collection.connect(the_new_node, rel_attrs)
else:
raise WrongTypeError
#r = this_resource.relationship_collection_response(related_collection_name)
r = make_response('')
r.status_code = http_error_codes.NO_CONTENT
r.headers['Content-Type'] = CONTENT_TYPE
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
except (KeyError, TypeError, WrongTypeError):
r = application_codes.error_response([application_codes.BAD_FORMAT_VIOLATION])
except AttemptedCardinalityViolation:
r = application_codes.error_response([application_codes.ATTEMPTED_CARDINALITY_VIOLATION])
except MultipleNodesReturned:
r = application_codes.error_response([application_codes.MULTIPLE_NODES_WITH_ID_VIOLATION])
return r | 0.006219 |
def sequence(start, stop, step=None):
"""
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
"""
sc = SparkContext._active_spark_context
if step is None:
return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop)))
else:
return Column(sc._jvm.functions.sequence(
_to_java_column(start), _to_java_column(stop), _to_java_column(step))) | 0.005727 |
def astype(self, dtype, copy=True):
"""Return a copy of the array after casting to a specified type.
Parameters
----------
dtype : numpy.dtype or str
The type of the returned array.
copy : bool
Default `True`. By default, astype always returns a newly
allocated ndarray on the same context. If this is set to
`False`, and the dtype requested is the same as the ndarray's
dtype, the ndarray is returned instead of a copy.
Examples
--------
>>> x = mx.nd.sparse.zeros('row_sparse', (2,3), dtype='float32')
>>> y = x.astype('int32')
>>> y.dtype
<type 'numpy.int32'>
"""
if not copy and np.dtype(dtype) == self.dtype:
return self
res = zeros(shape=self.shape, ctx=self.context,
dtype=dtype, stype=self.stype)
self.copyto(res)
return res | 0.002094 |
async def read_frame(self) -> DataFrame:
"""Read a single frame from the local buffer.
If no frames are available but the stream is still open, waits until
more frames arrive. Otherwise, raises StreamConsumedError.
When a stream is closed, a single `None` is added to the data frame
Queue to wake up any waiting `read_frame` coroutines.
"""
if self._data_frames.qsize() == 0 and self.closed:
raise StreamConsumedError(self.id)
frame = await self._data_frames.get()
self._data_frames.task_done()
if frame is None:
raise StreamConsumedError(self.id)
return frame | 0.002967 |
def addRelation(self, link):
'''Appends Relation
'''
if isinstance(link, Relation):
self.internalLinks.append(link)
else:
raise TypeError(
'link Type should be InternalLink, not %s' % type(link)) | 0.01087 |
def count_discussions_handler(sender, **kwargs):
"""
Update the count of each type of discussion on an entry.
"""
if kwargs.get('instance') and kwargs.get('created'):
# The signal is emitted by the comment creation,
# so we do nothing, comment_was_posted is used instead.
return
comment = 'comment' in kwargs and kwargs['comment'] or kwargs['instance']
entry = comment.content_object
if isinstance(entry, Entry):
entry.comment_count = entry.comments.count()
entry.pingback_count = entry.pingbacks.count()
entry.trackback_count = entry.trackbacks.count()
entry.save(update_fields=[
'comment_count', 'pingback_count', 'trackback_count']) | 0.001364 |
def set_connect_args(self, dsn=None, **connect_kwargs):
r"""Set the new connection arguments for this pool.
The new connection arguments will be used for all subsequent
new connection attempts. Existing connections will remain until
they expire. Use :meth:`Pool.expire_connections()
<asyncpg.pool.Pool.expire_connections>` to expedite the connection
expiry.
:param str dsn:
Connection arguments specified using as a single string in
the following format:
``postgres://user:pass@host:port/database?option=value``.
:param \*\*connect_kwargs:
Keyword arguments for the :func:`~asyncpg.connection.connect`
function.
.. versionadded:: 0.16.0
"""
self._connect_args = [dsn]
self._connect_kwargs = connect_kwargs
self._working_addr = None
self._working_config = None
self._working_params = None | 0.002055 |
def setQuery( self, query ):
"""
Sets the fixed lookup query for this widget to the inputed query.
:param query | <orb.Query>
"""
self._query = query
if ( not self.signalsBlocked() ):
self.queryChanged.emit(query) | 0.023729 |
def match_local(self, prefix, includes, excludes):
"""
Filters os.walk() with include and exclude patterns.
See: http://stackoverflow.com/a/5141829/93559
"""
includes_pattern = r"|".join([fnmatch.translate(x) for x in includes])
excludes_pattern = r"|".join([fnmatch.translate(x) for x in excludes]) or r"$."
matches = []
for root, dirs, files in os.walk(prefix, topdown=True):
# exclude dirs
dirs[:] = [os.path.join(root, d) for d in dirs]
dirs[:] = [d for d in dirs if not re.match(excludes_pattern,
d.split(root)[1])]
# exclude/include files
files = [os.path.join(root, f) for f in files]
files = [os.path.join(root, f) for f in files
if not re.match(excludes_pattern, f)]
files = [os.path.join(root, f) for f in files
if re.match(includes_pattern, f.split(prefix)[1])]
for fname in files:
matches.append(fname)
return matches | 0.002703 |
def configure_ghostboxes(self, nghostx=0, nghosty=0, nghostz=0):
"""
Initialize the ghost boxes.
This function only needs to be called it boundary conditions other than "none" or
"open" are used. In such a case the number of ghostboxes must be known and is set
with this function.
Parameters
----------
nghostx, nghosty, nghostz : int
The number of ghost boxes in each direction. All values default to 0 (no ghost boxes).
"""
clibrebound.nghostx = c_int(nghostx)
clibrebound.nghosty = c_int(nghosty)
clibrebound.nghostz = c_int(nghostz)
return | 0.011923 |
def inspect_image(self, image_name, image_tag=''):
'''
a method to retrieve the settings of an image
:param image_name: string with name or id of image
:param image_tag: [optional] string with tag associated with image
:return: dictionary of settings of image
{ TOO MANY TO LIST }
'''
title = '%s.inspect_image' % self.__class__.__name__
# validate inputs
input_fields = {
'image_name': image_name,
'image_tag': image_tag
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# determine system command argument
sys_arg = image_name
if image_tag:
sys_arg += ':%s' % image_tag
# run inspect command
import json
sys_command = 'docker inspect %s' % sys_arg
output_dict = json.loads(self.command(sys_command))
image_settings = output_dict[0]
return image_settings | 0.003454 |
def _make_pull_imethod_resp(objs, eos, context_id):
"""
Create the correct imethod response for the open and pull methods
"""
eos_tup = (u'EndOfSequence', None, eos)
enum_ctxt_tup = (u'EnumerationContext', None, context_id)
return [("IRETURNVALUE", {}, objs), enum_ctxt_tup, eos_tup] | 0.006024 |
def process_includes(self, markdown_file_path: Path, content: str) -> str:
'''Replace all include statements with the respective file contents.
:param markdown_file_path: Path to curently processed Markdown file
:param content: Markdown content
:returns: Markdown content with resolved includes
'''
markdown_file_path = markdown_file_path.resolve()
self.logger.debug(f'Processing Markdown file: {markdown_file_path}')
processed_content = ''
include_statement_pattern = re.compile(
rf'((?<!\<)\<{"|".join(self.tags)}(?:\s[^\<\>]*)?\>.*?\<\/{"|".join(self.tags)}\>)',
flags=re.DOTALL
)
content_parts = include_statement_pattern.split(content)
for content_part in content_parts:
include_statement = self.pattern.fullmatch(content_part)
if include_statement:
body = self._tag_body_pattern.match(include_statement.group('body').strip())
options = self.get_options(include_statement.group('options'))
self.logger.debug(f'Processing include statement; body: {body}, options: {options}')
if body.group('repo'):
repo = body.group('repo')
repo_from_alias = self.options['aliases'].get(repo)
revision = None
if repo_from_alias:
self.logger.debug(f'Alias found: {repo}, resolved as: {repo_from_alias}')
if '#' in repo_from_alias:
repo_url, revision = repo_from_alias.split('#', maxsplit=1)
else:
repo_url = repo_from_alias
else:
repo_url = repo
if body.group('revision'):
revision = body.group('revision')
self.logger.debug(f'Highest priority revision specified in the include statement: {revision}')
self.logger.debug(f'File in Git repository referenced; URL: {repo_url}, revision: {revision}')
repo_path = self._sync_repo(repo_url, revision)
self.logger.debug(f'Local path of the repo: {repo_path}')
included_file_path = repo_path / body.group('path')
self.logger.debug(f'Resolved path to the included file: {included_file_path}')
processed_content_part = self._process_include(
included_file_path,
body.group('from_heading'),
body.group('to_heading'),
options
)
else:
self.logger.debug('Local file referenced')
included_file_path = self._get_included_file_path(body.group('path'), markdown_file_path)
self.logger.debug(f'Resolved path to the included file: {included_file_path}')
processed_content_part = self._process_include(
included_file_path,
body.group('from_heading'),
body.group('to_heading'),
options
)
if self.options['recursive'] and self.pattern.search(processed_content_part):
self.logger.debug('Recursive call of include statements processing')
processed_content_part = self.process_includes(included_file_path, processed_content_part)
if options.get('inline'):
self.logger.debug('Processing included content part as inline')
processed_content_part = re.sub(r'\s+', ' ', processed_content_part).strip()
else:
processed_content_part = content_part
processed_content += processed_content_part
return processed_content | 0.00426 |
def profile():
"""View for editing a profile."""
# Create forms
verification_form = VerificationForm(formdata=None, prefix="verification")
profile_form = profile_form_factory()
# Process forms
form = request.form.get('submit', None)
if form == 'profile':
handle_profile_form(profile_form)
elif form == 'verification':
handle_verification_form(verification_form)
return render_template(
current_app.config['USERPROFILES_PROFILE_TEMPLATE'],
profile_form=profile_form,
verification_form=verification_form,) | 0.001721 |
def children(self):
"""获取此话题的子话题
:return: 此话题的子话题, 返回生成器
:rtype: Topic.Iterable
"""
self._make_soup()
child_topic_tag = self.soup.find('div', class_='child-topic')
if child_topic_tag is None:
return []
elif '共有' not in child_topic_tag.contents[-2].text:
for topic_tag in child_topic_tag.div.find_all('a'):
yield Topic(Zhihu_URL + topic_tag['href'],
topic_tag.text.strip(),
session=self._session)
else:
flag = 'load'
child = ''
data = {'_xsrf': self.xsrf}
params = {
'parent': self.id
}
while flag == 'load':
params['child'] = child
res = self._session.post(Topic_Get_Children_Url,
params=params, data=data)
j = map(lambda x: x[0], res.json()['msg'][1])
*topics, last = j
for topic in topics:
yield Topic(Zhihu_URL + '/topic/' + topic[2], topic[1],
session=self._session)
flag = last[0]
child = last[2]
if flag == 'topic':
yield Topic(Zhihu_URL + '/topic/' + last[2], last[1],
session=self._session) | 0.001401 |
def __forall_files(file_paths, output_dir, op):
"""
Applies a function to a set of files and an output directory.
:param str output_dir: Output directory
:param list[str] file_paths: Absolute file paths to move
"""
for file_path in file_paths:
if not file_path.startswith('/'):
raise ValueError('Path provided (%s) is relative not absolute.' % file_path)
dest = os.path.join(output_dir, os.path.basename(file_path))
op(file_path, dest) | 0.004032 |
def tokenize_ofp_instruction_arg(arg):
"""
Tokenize an argument portion of ovs-ofctl style action string.
"""
arg_re = re.compile("[^,()]*")
try:
rest = arg
result = []
while len(rest):
m = arg_re.match(rest)
if m.end(0) == len(rest):
result.append(rest)
return result
if rest[m.end(0)] == '(':
this_block, rest = _tokenize_paren_block(
rest, m.end(0) + 1)
result.append(this_block)
elif rest[m.end(0)] == ',':
result.append(m.group(0))
rest = rest[m.end(0):]
else: # is ')'
raise Exception
if len(rest):
assert rest[0] == ','
rest = rest[1:]
return result
except Exception:
raise ryu.exception.OFPInvalidActionString(action_str=arg) | 0.001071 |
def get_db_row(db, start, size):
"""
Here you see and example of readying out a part of a DB
Args:
db (int): The db to use
start (int): The index of where to start in db data
size (int): The size of the db data to read
"""
type_ = snap7.snap7types.wordlen_to_ctypes[snap7.snap7types.S7WLByte]
data = client.db_read(db, start, type_, size)
# print_row(data[:60])
return data | 0.002326 |
def register_provider(self, provider):
'''
Register a :class:`skosprovider.providers.VocabularyProvider`.
:param skosprovider.providers.VocabularyProvider provider: The provider
to register.
:raises RegistryException: A provider with this id or uri has already
been registered.
'''
if provider.get_vocabulary_id() in self.providers:
raise RegistryException(
'A provider with this id has already been registered.'
)
self.providers[provider.get_vocabulary_id()] = provider
if provider.concept_scheme.uri in self.concept_scheme_uri_map:
raise RegistryException(
'A provider with URI %s has already been registered.' % provider.concept_scheme.uri
)
self.concept_scheme_uri_map[provider.concept_scheme.uri] = provider.get_vocabulary_id() | 0.005495 |
def get_releases(self, url='', headers={}, repo_name=''):
"""
Retrieves the releases for the given repo in JSON.
"""
url_releases = (url + '/releases')
r = requests.get(url_releases, headers=headers)
self.releases_json[repo_name] = r.json() | 0.006944 |
def top_priority_effect(effects):
"""
Given a collection of variant transcript effects,
return the top priority object. ExonicSpliceSite variants require special
treatment since they actually represent two effects -- the splicing modification
and whatever else would happen to the exonic sequence if nothing else gets
changed. In cases where multiple transcripts give rise to multiple
effects, use a variety of filtering and sorting heuristics to pick
the canonical transcript.
"""
if len(effects) == 0:
raise ValueError("List of effects cannot be empty")
effects = map(
select_between_exonic_splice_site_and_alternate_effect,
effects)
effects_grouped_by_gene = apply_groupby(
effects, fn=gene_id_of_associated_transcript, skip_none=False)
if None in effects_grouped_by_gene:
effects_without_genes = effects_grouped_by_gene.pop(None)
else:
effects_without_genes = []
# if we had any effects associated with genes then choose one of those
if len(effects_grouped_by_gene) > 0:
effects_with_genes = [
top_priority_effect_for_single_gene(gene_effects)
for gene_effects in effects_grouped_by_gene.values()
]
return max(effects_with_genes, key=multi_gene_effect_sort_key)
else:
# if all effects were without genes then choose the best among those
assert len(effects_without_genes) > 0
return max(effects_without_genes, key=multi_gene_effect_sort_key) | 0.001302 |
def _get_xml(self, close_tag=True):
"""
generate the xml string representation.
:param close_tag: should the '</provenance_step>' tag be added or not.
:type close_tag: bool
:return: the xml
:rtype: str
"""
provenance_step_element = Element('provenance_step', {
'timestamp': self.time.isoformat()
})
title = SubElement(provenance_step_element, 'title')
title.text = self.title
description = SubElement(provenance_step_element, 'description')
description.text = self.description
xml_string = tostring(provenance_step_element, 'unicode')
if close_tag:
return xml_string
else:
# Remove the close tag
return xml_string[:-len('</provenance_step>')] | 0.00243 |
def kml_region(map_source, z, x, y):
"""KML region fetched by a Google Earth network link. """
map = app.config["mapsources"][map_source]
kml_doc = KMLRegion(app.config["url_formatter"], map, app.config["LOG_TILES_PER_ROW"],
z, x, y)
return kml_response(kml_doc) | 0.006623 |
def format_iso_datetime(datetime_obj):
"""Formats the given datetime as a UTC-zoned ISO 8601 date string.
:param datetime_obj: The datetime object.
:type datetime_obj: datetime
:return: The datetime object in 8601 string form.
:rtype: datetime
"""
datetime_obj = localize_datetime(datetime_obj, pytz.utc)
if datetime_obj < MIN_DATE:
datetime_obj = MIN_DATE
elif datetime_obj > MAX_DATE:
datetime_obj = MAX_DATE
return datetime_obj.replace(tzinfo=None).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z' | 0.003623 |
def generate_code_cover(self):
"""
Generate a list of all recovered basic blocks.
"""
lst = []
for cfg_node in self.graph.nodes():
size = cfg_node.size
lst.append((cfg_node.addr, size))
lst = sorted(lst, key=lambda x: x[0])
return lst | 0.006329 |
def __click_dropdown_link_text(self, link_text, link_css):
""" When a link may be hidden under a dropdown menu, use this. """
soup = self.get_beautiful_soup()
drop_down_list = soup.select('[class*=dropdown]')
for item in soup.select('[class*=HeaderMenu]'):
drop_down_list.append(item)
for item in soup.select('[class*=menu-item]'):
drop_down_list.append(item)
for item in soup.select('[class*=chevron]'):
drop_down_list.append(item)
csstype = link_css.split('[')[1].split('=')[0]
for item in drop_down_list:
if link_text in item.text.split('\n') and csstype in item.decode():
dropdown_css = ""
for css_class in item['class']:
dropdown_css += '.'
dropdown_css += css_class
dropdown_css = item.name + dropdown_css
matching_dropdowns = self.find_visible_elements(dropdown_css)
for dropdown in matching_dropdowns:
# The same class names might be used for multiple dropdowns
try:
page_actions.hover_element_and_click(
self.driver, dropdown, link_text,
click_by=By.LINK_TEXT, timeout=0.2)
return True
except Exception:
pass
return False | 0.001372 |
def __set_cache(self, tokens):
"""
Sets the tokens cache.
:param tokens: Completer tokens list.
:type tokens: tuple or list
"""
if DefaultCompleter._DefaultCompleter__tokens.get(self.__language):
return
DefaultCompleter._DefaultCompleter__tokens[self.__language] = tokens | 0.005848 |
def image(self, src, title, text):
"""Rendering a image with title and text.
:param src: source link of the image.
:param title: title text of the image.
:param text: alt text of the image.
"""
# rst does not support title option
# and I couldn't find title attribute in HTML standard
return '\n'.join([
'',
'.. image:: {}'.format(src),
' :target: {}'.format(src),
' :alt: {}'.format(text),
'',
]) | 0.003745 |
def posts(self, blogname, type=None, **kwargs):
"""
Gets a list of posts from a particular blog
:param blogname: a string, the blogname you want to look up posts
for. eg: codingjester.tumblr.com
:param id: an int, the id of the post you are looking for on the blog
:param tag: a string, the tag you are looking for on posts
:param limit: an int, the number of results you want
:param offset: an int, the offset of the posts you want to start at.
:param before: an int, the timestamp for posts you want before.
:param filter: the post format you want returned: HTML, text or raw.
:param type: the type of posts you want returned, e.g. video. If omitted returns all post types.
:returns: a dict created from the JSON response
"""
if type is None:
url = '/v2/blog/{}/posts'.format(blogname)
else:
url = '/v2/blog/{}/posts/{}'.format(blogname, type)
return self.send_api_request("get", url, kwargs, ['id', 'tag', 'limit', 'offset', 'before', 'reblog_info', 'notes_info', 'filter', 'api_key'], True) | 0.003433 |
def create(self, body=None, raise_exc=True, headers=None, **kwargs):
'''Performs an HTTP POST to the server, to create a
subordinate resource. Returns a new HALNavigator representing
that resource.
`body` may either be a string or a dictionary representing json
`headers` are additional headers to send in the request
'''
return self._request(POST, body, raise_exc, headers, **kwargs) | 0.004535 |
def delete_key(self, key_to_delete):
"""Deletes the specified key
:param key_to_delete:
:return:
"""
log = logging.getLogger(self.cls_logger + '.delete_key')
log.info('Attempting to delete key: {k}'.format(k=key_to_delete))
try:
self.s3client.delete_object(Bucket=self.bucket_name, Key=key_to_delete)
except ClientError:
_, ex, trace = sys.exc_info()
log.error('ClientError: Unable to delete key: {k}\n{e}'.format(k=key_to_delete, e=str(ex)))
return False
else:
log.info('Successfully deleted key: {k}'.format(k=key_to_delete))
return True | 0.005839 |
def utf8(value):
"""Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
"""
if isinstance(value, _UTF8_TYPES):
return value
assert isinstance(value, unicode)
return value.encode("utf-8") | 0.002825 |
def get_device(_id):
"""
Pull a device from the API.
"""
url = DEVICE_URL % _id
arequest = requests.get(url, headers=HEADERS)
status_code = str(arequest.status_code)
if status_code == '401':
_LOGGER.error("Token expired.")
return False
return arequest.json() | 0.00578 |
def parse_line(self, line, lineno):
"""Check a single line for an error. Keeps track of the linenumber"""
# TaskCluster logs are a bit wonky.
#
# TaskCluster logs begin with output coming from TaskCluster itself,
# before it has transitioned control of the task to the configured
# process. These "internal" logs look like the following:
#
# [taskcluster 2016-09-09 17:41:43.544Z] Worker Group: us-west-2b
#
# If an error occurs during this "setup" phase, TaskCluster may emit
# lines beginning with ``[taskcluster:error]``.
#
# Once control has transitioned from TaskCluster to the configured
# task process, lines can be whatever the configured process emits.
# The popular ``run-task`` wrapper prefixes output to emulate
# TaskCluster's "internal" logs. e.g.
#
# [vcs 2016-09-09T17:45:02.842230Z] adding changesets
#
# This prefixing can confuse error parsing. So, we strip it.
#
# Because regular expression matching and string manipulation can be
# expensive when performed on every line, we only strip the TaskCluster
# log prefix if we know we're in a TaskCluster log.
# First line of TaskCluster logs almost certainly has this.
if line.startswith('[taskcluster '):
self.is_taskcluster = True
# For performance reasons, only do this if we have identified as
# a TC task.
if self.is_taskcluster:
line = re.sub(self.RE_TASKCLUSTER_NORMAL_PREFIX, "", line)
if self.is_error_line(line):
self.add(line, lineno) | 0.001179 |
def decode(self, source):
"""Decode a source map object into a SourceMapIndex.
The index is keyed on (dst_line, dst_column) for lookups,
and a per row index is kept to help calculate which Token to retrieve.
For example:
A minified source file has two rows and two tokens per row.
# All parsed tokens
tokens = [
Token(dst_row=0, dst_col=0),
Token(dst_row=0, dst_col=5),
Token(dst_row=1, dst_col=0),
Token(dst_row=1, dst_col=12),
]
Two dimentional array of columns -> row
rows = [
[0, 5],
[0, 12],
]
Token lookup, based on location
index = {
(0, 0): tokens[0],
(0, 5): tokens[1],
(1, 0): tokens[2],
(1, 12): tokens[3],
}
To find the token at (1, 20):
- Check if there's a direct hit on the index (1, 20) => False
- Pull rows[1] => [0, 12]
- bisect_right to find the closest match:
bisect_right([0, 12], 20) => 2
- Fetch the column number before, since we want the column
lte to the bisect_right: 2-1 => row[2-1] => 12
- At this point, we know the token location, (1, 12)
- Pull (1, 12) from index => tokens[3]
"""
# According to spec (https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit#heading=h.h7yy76c5il9v)
# A SouceMap may be prepended with ")]}'" to cause a Javascript error.
# If the file starts with that string, ignore the entire first line.
if source[:4] == ")]}'" or source[:3] == ")]}":
source = source.split('\n', 1)[1]
smap = json.loads(source)
sources = smap['sources']
sourceRoot = smap.get('sourceRoot')
names = list(map(text_type, smap['names']))
mappings = smap['mappings']
lines = mappings.split(';')
if sourceRoot is not None:
sources = list(map(partial(os.path.join, sourceRoot), sources))
# List of all tokens
tokens = []
# line_index is used to identify the closest column when looking up a token
line_index = []
# Main index of all tokens
# The index is keyed on (line, column)
index = {}
dst_col, src_id, src_line, src_col, name_id = 0, 0, 0, 0, 0
for dst_line, line in enumerate(lines):
# Create list for columns in index
line_index.append([])
segments = line.split(',')
dst_col = 0
for segment in segments:
if not segment:
continue
parse = self.parse_vlq(segment)
dst_col += parse[0]
src = None
name = None
if len(parse) > 1:
try:
src_id += parse[1]
if not 0 <= src_id < len(sources):
raise SourceMapDecodeError(
"Segment %s references source %d; there are "
"%d sources" % (segment, src_id, len(sources))
)
src = sources[src_id]
src_line += parse[2]
src_col += parse[3]
if len(parse) > 4:
name_id += parse[4]
if not 0 <= name_id < len(names):
raise SourceMapDecodeError(
"Segment %s references name %d; there are "
"%d names" % (segment, name_id, len(names))
)
name = names[name_id]
except IndexError:
raise SourceMapDecodeError(
"Invalid segment %s, parsed as %r"
% (segment, parse)
)
try:
assert dst_line >= 0, ('dst_line', dst_line)
assert dst_col >= 0, ('dst_col', dst_col)
assert src_line >= 0, ('src_line', src_line)
assert src_col >= 0, ('src_col', src_col)
except AssertionError as e:
raise SourceMapDecodeError(
"Segment %s has negative %s (%d), in file %s"
% (segment, e.message[0], e.message[1], src)
)
token = Token(dst_line, dst_col, src, src_line, src_col, name)
tokens.append(token)
# Insert into main index
index[(dst_line, dst_col)] = token
# Insert into specific line index
line_index[dst_line].append(dst_col)
return SourceMapIndex(smap, tokens, line_index, index, sources) | 0.000781 |
def copy(self):
"""
Make a copy of the CFG.
:return: A copy of the CFG instance.
:rtype: angr.analyses.CFG
"""
new_cfg = CFGEmulated.__new__(CFGEmulated)
super(CFGEmulated, self).make_copy(new_cfg)
new_cfg._indirect_jump_target_limit = self._indirect_jump_target_limit
new_cfg.named_errors = dict(self.named_errors)
new_cfg.errors = list(self.errors)
new_cfg._fail_fast = self._fail_fast
new_cfg._max_steps = self._max_steps
new_cfg.project = self.project
# Intelligently (or stupidly... you tell me) fill it up
new_cfg._edge_map = self._edge_map.copy()
new_cfg._loop_back_edges = self._loop_back_edges[::]
new_cfg._executable_address_ranges = self._executable_address_ranges[::]
new_cfg._unresolvable_runs = self._unresolvable_runs.copy()
new_cfg._overlapped_loop_headers = self._overlapped_loop_headers[::]
new_cfg._thumb_addrs = self._thumb_addrs.copy()
new_cfg._keep_state = self._keep_state
return new_cfg | 0.002752 |
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: text
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: jinja
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, 'raw_body', '')
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret} | 0.000958 |
def _check_if_all_updated(self):
"""Check if all parameters from the TOC has at least been fetched
once"""
for g in self.toc.toc:
if g not in self.values:
return False
for n in self.toc.toc[g]:
if n not in self.values[g]:
return False
return True | 0.005634 |
def writefasta(self, fname):
""" Write sequences to FASTA formatted file"""
f = open(fname, "w")
fa_str = "\n".join([">%s\n%s" % (id, self._format_seq(seq)) for id, seq in self.items()])
f.write(fa_str)
f.close() | 0.011905 |
def flatten_container(self, container):
"""
Accepts a marathon container and pulls out the nested values into the top level
"""
for names in ARG_MAP.values():
if names[TransformationTypes.MARATHON.value]['name'] and \
'.' in names[TransformationTypes.MARATHON.value]['name']:
marathon_dotted_name = names[TransformationTypes.MARATHON.value]['name']
parts = marathon_dotted_name.split('.')
if parts[-2] == 'parameters':
# Special lookup for docker parameters
common_type = names[TransformationTypes.MARATHON.value].get('type')
result = self._lookup_parameter(container, parts[-1], common_type)
if result:
container[marathon_dotted_name] = result
else:
result = lookup_nested_dict(container, *parts)
if result:
container[marathon_dotted_name] = result
return container | 0.006481 |
def split_prefix(key, prefixs):
"""
split key string into prefix and remainder
for first matching prefix from a list
"""
key_upper = key.upper()
for prefix in prefixs:
if key_upper.startswith(prefix):
plen = len(prefix)
return (key_upper[:plen], key[plen:]) | 0.005731 |
def set_resubscription_params(self, addresses=None, bind_to=None):
"""You can specify a dgram address (udp or unix) on which all of the subscriptions
request will be forwarded to (obviously changing the node address to the router one).
The system could be useful to build 'federated' setup.
* http://uwsgi.readthedocs.io/en/latest/Changelog-2.0.1.html#resubscriptions
:param str|unicode|list[str|unicode] addresses: Forward subscriptions to the specified subscription server.
:param str|unicode|list[str|unicode] bind_to: Bind to the specified address when re-subscribing.
"""
self._set_aliased('resubscribe', addresses, multi=True)
self._set_aliased('resubscribe-bind', bind_to)
return self | 0.009009 |
def get_prefixes(dirname: str, extension: str) -> List[str]:
""" Returns a list of prefixes to files in the directory (which might be a whole
corpus, or a train/valid/test subset. The prefixes include the path leading
up to it, but only the filename up until the first observed period '.'
"""
prefixes = []
for root, _, filenames in os.walk(dirname):
for filename in filenames:
if filename.endswith(extension):
# Then it's an input feature file and its prefix will
# correspond to a training example
prefixes.append(os.path.join(root, filename.split(".")[0]))
return sorted(prefixes) | 0.002941 |
def _refresh(self):
"""
Remove all existing results files and reinit the h5 arrays
so that the tetrad object is just like fresh from a CLI start.
"""
## clear any existing results files
oldfiles = [self.files.qdump] + \
self.database.__dict__.values() + \
self.trees.__dict__.values()
for oldfile in oldfiles:
if oldfile:
if os.path.exists(oldfile):
os.remove(oldfile)
## store old ipcluster info
oldcluster = copy.deepcopy(self._ipcluster)
## reinit the tetrad object data.
self.__init__(
name=self.name,
data=self.files.data,
mapfile=self.files.mapfile,
workdir=self.dirs,
method=self.params.method,
guidetree=self.files.tree,
resolve_ambigs=self.params.resolve_ambigs,
save_invariants=self.params.save_invariants,
nboots=self.params.nboots,
nquartets=self.params.nquartets,
initarr=True,
quiet=True,
cli=self.kwargs.get("cli")
)
## retain the same ipcluster info
self._ipcluster = oldcluster | 0.011858 |
def CloseHandle(self):
'''Releases a handle acquired with VMGuestLib_OpenHandle'''
if hasattr(self, 'handle'):
ret = vmGuestLib.VMGuestLib_CloseHandle(self.handle.value)
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
del(self.handle) | 0.009836 |
def main():
""" Parses the command-line args, and calls run. """
parser = argparse.ArgumentParser(
description='A pipeline that generates analysis pipelines.')
parser.add_argument('input', nargs='?',
help='A valid metapipe configuration file.')
parser.add_argument('-o', '--output',
help='An output destination. If none is provided, the '
'results will be printed to stdout.',
default=sys.stdout)
parser.add_argument('-t', '--temp',
help='A desired metapipe binary file. This is used to store '
'temp data between generation and execution. '
'(Default: "%(default)s")', default='.metapipe')
parser.add_argument('-s', '--shell',
help='The path to the shell to be used when executing the '
'pipeline. (Default: "%(default)s)"',
default='/bin/bash')
parser.add_argument('-r', '--run',
help='Run the pipeline as soon as it\'s ready.',
action='store_true')
parser.add_argument('-n', '--name',
help='A name for the pipeline.',
default='')
parser.add_argument('-j', '--job-type',
help='The destination for calculations (i.e. local, a PBS '
'queue on a cluster, etc).\nOptions: {}. '
'(Default: "%(default)s)"'.format(JOB_TYPES.keys()),
default='local')
parser.add_argument('-p', '--max-jobs',
help='The maximum number of concurrent jobs allowed. '
'Defaults to maximum available cores.',
default=None)
parser.add_argument('--report-type',
help='The output report type. By default metapipe will '
'print updates to the console. \nOptions: {}. '
'(Default: "%(default)s)"'.format(QUEUE_TYPES.keys()),
default='text')
parser.add_argument('-v','--version',
help='Displays the current version of the application.',
action='store_true')
args = parser.parse_args()
if args.version:
print('Version: {}'.format(__version__))
sys.exit(0)
try:
with open(args.input) as f:
config = f.read()
except IOError:
print('No valid config file found.')
return -1
run(config, args.max_jobs, args.output, args.job_type, args.report_type,
args.shell, args.temp, args.run) | 0.011565 |
def adjust_jobs_priority(self, high_value_jobs, priority=1):
"""For every job priority determine if we need to increase or decrease the job priority
Currently, high value jobs have a priority of 1 and a timeout of 0.
"""
# Only job priorities that don't have an expiration date (2 weeks for new jobs or year 2100
# for jobs update via load_preseed) are updated
for jp in JobPriority.objects.filter(expiration_date__isnull=True):
if jp.unique_identifier() not in high_value_jobs:
if jp.priority != SETA_LOW_VALUE_PRIORITY:
logger.warning('Decreasing priority of %s', jp.unique_identifier())
jp.priority = SETA_LOW_VALUE_PRIORITY
jp.save(update_fields=['priority'])
elif jp.priority != priority:
logger.warning('Increasing priority of %s', jp.unique_identifier())
jp.priority = priority
jp.save(update_fields=['priority']) | 0.0059 |
def search_tree(self, name): # noqa: D302
r"""
Search tree for all nodes with a specific name.
:param name: Node name to search for
:type name: :ref:`NodeName`
:raises: RuntimeError (Argument \`name\` is not valid)
For example:
>>> from __future__ import print_function
>>> import pprint, ptrie
>>> tobj = ptrie.Trie('/')
>>> tobj.add_nodes([
... {'name':'root', 'data':[]},
... {'name':'root/anode', 'data':7},
... {'name':'root/bnode', 'data':[]},
... {'name':'root/cnode', 'data':[]},
... {'name':'root/bnode/anode', 'data':['a', 'b', 'c']},
... {'name':'root/cnode/anode/leaf', 'data':True}
... ])
>>> print(tobj)
root
├anode (*)
├bnode
│└anode (*)
└cnode
└anode
└leaf (*)
>>> pprint.pprint(tobj.search_tree('anode'), width=40)
['root/anode',
'root/bnode/anode',
'root/cnode/anode',
'root/cnode/anode/leaf']
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
return self._search_tree(name) | 0.001493 |
def load_experiment(folder, return_path=False):
'''load_experiment:
reads in the config.json for a folder, returns None if not found.
:param folder: full path to experiment folder
:param return_path: if True, don't load the config.json, but return it
'''
fullpath = os.path.abspath(folder)
config = "%s/config.json" %(fullpath)
if not os.path.exists(config):
bot.error("config.json could not be found in %s" %(folder))
config = None
if return_path is False and config is not None:
config = read_json(config)
return config | 0.005128 |
def region_selection(request):
'''Handle submission of the region selection form in the base template. '''
form = get_region_select_form(request.GET)
abbr = form.data.get('abbr')
if not abbr or len(abbr) != 2:
return redirect('homepage')
return redirect('region', abbr=abbr) | 0.003311 |
def hardware_version(self):
"""Returns the hardware version of the connected J-Link as a
major.minor string.
Args:
self (JLink): the ``JLink`` instance
Returns:
Hardware version string.
"""
version = self._dll.JLINKARM_GetHardwareVersion()
major = version / 10000 % 100
minor = version / 100 % 100
return '%d.%02d' % (major, minor) | 0.004706 |
def reload(self):
'Generate histrow for each row and then reverse-sort by length.'
self.rows = []
# if len(self.origCols) == 1 and self.origCols[0].type in (int, float, currency):
# self.numericBinning()
# else:
self.discreteBinning()
# automatically add cache to all columns now that everything is binned
for c in self.nonKeyVisibleCols:
c._cachedValues = collections.OrderedDict() | 0.006508 |
def insertAdjacentHTML(self, position: str, html: str) -> None:
"""Parse ``html`` to DOM and insert to ``position``.
``position`` is a case-insensive string, and must be one of
"beforeBegin", "afterBegin", "beforeEnd", or "afterEnd".
"""
df = self._parse_html(html)
pos = position.lower()
if pos == 'beforebegin':
self.before(df)
elif pos == 'afterbegin':
self.prepend(df)
elif pos == 'beforeend':
self.append(df)
elif pos == 'afterend':
self.after(df)
else:
raise ValueError(
'The value provided ({}) is not one of "beforeBegin", '
'"afterBegin", "beforeEnd", or "afterEnd".'.format(position)
) | 0.002538 |
def get_bins(values):
"""
Automatically compute the number of bins for discrete variables.
Parameters
----------
values = numpy array
values
Returns
-------
array with the bins
Notes
-----
Computes the width of the bins by taking the maximun of the Sturges and the Freedman-Diaconis
estimators. Acording to numpy `np.histogram` this provides good all around performance.
The Sturges is a very simplistic estimator based on the assumption of normality of the data.
This estimator has poor performance for non-normal data, which becomes especially obvious for
large data sets. The estimate depends only on size of the data.
The Freedman-Diaconis rule uses interquartile range (IQR) to estimate the binwidth.
It is considered a robusts version of the Scott rule as the IQR is less affected by outliers
than the standard deviation. However, the IQR depends on fewer points than the standard
deviation, so it is less accurate, especially for long tailed distributions.
"""
x_min = values.min().astype(int)
x_max = values.max().astype(int)
# Sturges histogram bin estimator
bins_sturges = (x_max - x_min) / (np.log2(values.size) + 1)
# The Freedman-Diaconis histogram bin estimator.
iqr = np.subtract(*np.percentile(values, [75, 25])) # pylint: disable=assignment-from-no-return
bins_fd = 2 * iqr * values.size ** (-1 / 3)
width = round(np.max([1, bins_sturges, bins_fd])).astype(int)
return np.arange(x_min, x_max + width + 1, width) | 0.006402 |
def get_address_overview(address, coin_symbol='btc', api_key=None):
'''
Takes an address and coin_symbol and return the address details
'''
assert is_valid_address_for_coinsymbol(b58_address=address,
coin_symbol=coin_symbol)
url = make_url(coin_symbol, 'addrs', **{address: 'balance'})
params = {}
if api_key:
params['token'] = api_key
r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS)
return get_valid_json(r) | 0.006024 |
def cleanup_temp_filesystem(self):
"""
Cleanup the temporary directory
"""
logger.debug("Deleting compressed file extraction directory: " + self.tmp_dir)
shutil.rmtree(self.tmp_dir, True) | 0.013216 |
def _get_description(prev_description):
"""Get the parsed description file (a dictionary) from another
parsed description file."""
current_desc_file = os.path.join(utils.get_project_root(),
prev_description['data-source'],
"info.yml")
if not os.path.isfile(current_desc_file):
logging.error("You are probably not in the folder of a model, because "
"%s is not a file.", current_desc_file)
sys.exit(-1)
with open(current_desc_file, 'r') as ymlfile:
current_description = yaml.load(ymlfile)
return current_description | 0.001511 |
def _get_server(self):
"""
Get server to use for request.
Also process inactive server list, re-add them after given interval.
"""
with self._lock:
inactive_server_count = len(self._inactive_servers)
for i in range(inactive_server_count):
try:
ts, server, message = heapq.heappop(self._inactive_servers)
except IndexError:
pass
else:
if (ts + self.retry_interval) > time():
# Not yet, put it back
heapq.heappush(self._inactive_servers,
(ts, server, message))
else:
self._active_servers.append(server)
logger.warn("Restored server %s into active pool",
server)
# if none is old enough, use oldest
if not self._active_servers:
ts, server, message = heapq.heappop(self._inactive_servers)
self._active_servers.append(server)
logger.info("Restored server %s into active pool", server)
server = self._active_servers[0]
self._roundrobin()
return server | 0.001516 |
def audit_customer_subscription(customer, unknown=True):
"""
Audits the provided customer's subscription against stripe and returns a pair
that contains a boolean and a result type.
Default result types can be found in zebra.conf.defaults and can be
overridden in your project's settings.
"""
if (hasattr(customer, 'suspended') and customer.suspended):
result = AUDIT_RESULTS['suspended']
else:
if hasattr(customer, 'subscription'):
try:
result = AUDIT_RESULTS[customer.subscription.status]
except KeyError, err:
# TODO should this be a more specific exception class?
raise Exception("Unable to locate a result set for \
subscription status %s in ZEBRA_AUDIT_RESULTS") % str(err)
else:
result = AUDIT_RESULTS['no_subscription']
return result | 0.002257 |
def ok_cred_def_id(token: str, issuer_did: str = None) -> bool:
"""
Whether input token looks like a valid credential definition identifier from input issuer DID (default any); i.e.,
<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag> for protocol >= 1.4, or
<issuer-did>:3:CL:<schema-seq-no> for protocol == 1.3.
:param token: candidate string
:param issuer_did: issuer DID to match, if specified
:return: whether input token looks like a valid credential definition identifier
"""
cd_id_m = re.match('([{}]{{21,22}}):3:CL:[1-9][0-9]*(:.+)?$'.format(B58), token or '')
return bool(cd_id_m) and ((not issuer_did) or cd_id_m.group(1) == issuer_did) | 0.007257 |
def _show_help(self, txt,
mode_to_set=MAIN_HELP_MODE,
caption=' Help ',
prompt=' Press any key to hide ',
too_small_msg='Window too small to show message',
is_message=False):
""" Display a help, info or question window. """
self.helpWinContainer = None
self.helpWin = None
self.operation_mode = mode_to_set
txt_col = curses.color_pair(5)
box_col = curses.color_pair(3)
caption_col = curses.color_pair(4)
lines = txt.split('\n')
st_lines = [item.replace('\r','') for item in lines]
lines = [item.strip() for item in st_lines]
inner_height = len(lines) + 2
inner_width = self._get_message_width_from_list(lines) + 4
outer_height = inner_height + 2
outer_width = inner_width + 2
if self.window_mode == CONFIG_MODE and \
self.operation_mode > CONFIG_HELP_MODE:
use_empty_win = True
height_to_use = outer_height
width_to_use = outer_width
else:
use_empty_win = False
height_to_use = inner_height
width_to_use = inner_width
if self.maxY - 2 < outer_height or self.maxX < outer_width:
txt = too_small_msg
inner_height = 3
inner_width = len(txt) + 4
if use_empty_win:
height_to_use = inner_height +2
width_to_use = inner_width + 2
else:
height_to_use = inner_height
width_to_use = inner_width
if self.maxX < width_to_use:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(' *** Window too small even to show help warning ***')
self.operation_mode = self.window_mode = NORMAL_MODE
return
lines = [ txt , ]
if use_empty_win:
self.helpWinContainer = curses.newwin(height_to_use,width_to_use,int((self.maxY-height_to_use)/2),int((self.maxX-width_to_use)/2))
self.helpWinContainer.bkgdset(' ', box_col)
self.helpWinContainer.erase()
self.helpWin = curses.newwin(inner_height,inner_width,int((self.maxY-inner_height)/2),int((self.maxX-inner_width)/2))
self.helpWin.bkgdset(' ', box_col)
self.helpWin.erase()
self.helpWin.box()
if is_message:
start_with = txt_col
follow = caption_col
else:
start_with = caption_col
follow = txt_col
if caption.strip():
self.helpWin.addstr(0, int((inner_width-len(caption))/2), caption, caption_col)
splited = []
for i, n in enumerate(lines):
a_line = self._replace_starting_undesscore(n)
if a_line.startswith('%'):
self.helpWin.move(i + 1, 0)
try:
self.helpWin.addstr('├', curses.color_pair(3))
self.helpWin.addstr('─' * (inner_width - 2), curses.color_pair(3))
self.helpWin.addstr('┤', curses.color_pair(3))
except:
self.helpWin.addstr('├'.encode('utf-8'), curses.color_pair(3))
self.helpWin.addstr('─'.encode('utf-8') * (inner_width - 2), curses.color_pair(3))
self.helpWin.addstr('┤'.encode('utf-8'), curses.color_pair(3))
self.helpWin.addstr(i + 1, inner_width-len(a_line[1:]) - 1, a_line[1:].replace('_', ' '), caption_col)
#self.helpWin.addstr(i + 1, int((inner_width-len(a_line[1:]))/2), a_line[1:].replace('_', ' '), caption_col)
else:
splited = a_line.split('|')
self.helpWin.move(i + 1, 2)
for part, part_string in enumerate(splited):
if part_string.strip():
if part == 0 or part % 2 == 0:
self.helpWin.addstr(splited[part], start_with)
else:
self.helpWin.addstr(splited[part], follow)
if prompt.strip():
self.helpWin.addstr(inner_height - 1, int(inner_width-len(prompt)-1), prompt)
if use_empty_win:
self.helpWinContainer.refresh()
self.helpWin.refresh() | 0.006909 |
def rate_of_turn(speed, bank):
'''return expected rate of turn in degrees/s for given speed in m/s and
bank angle in degrees'''
if abs(speed) < 2 or abs(bank) > 80:
return 0
ret = degrees(9.81*tan(radians(bank))/speed)
return ret | 0.003846 |
def pop_parameter(key):
'''Remove and get parameter by key.
Args:
key(str): Key of parameter.
Returns: ~nnabla.Variable
Parameter if key found, otherwise None.
'''
names = key.split('/')
if len(names) > 1:
with parameter_scope(names[0]):
return pop_parameter('/'.join(names[1:]))
global current_scope
param = current_scope.get(key, None)
if param is not None:
del current_scope[key]
return param | 0.002075 |
def upload(self, filename, input, packethook=None, timeout=SOCK_TIMEOUT):
"""This method initiates a tftp upload to the configured remote host,
uploading the filename passed. It reads the file from input, which
can be a file-like object or a path to a local file. If a packethook
is provided, it must be a function that takes a single parameter,
which will be a copy of each DAT packet sent in the form of a
TftpPacketDAT object. The timeout parameter may be used to override
the default SOCK_TIMEOUT setting, which is the amount of time that
the client will wait for a DAT packet to be ACKd by the server.
Note: If input is a hyphen, stdin is used."""
self.context = TftpContextClientUpload(self.host,
self.iport,
filename,
input,
self.options,
packethook,
timeout,
localip = self.localip)
self.context.start()
# Upload happens here
self.context.end()
metrics = self.context.metrics
log.info('')
log.info("Upload complete.")
if metrics.duration == 0:
log.info("Duration too short, rate undetermined")
else:
log.info("Uploaded %d bytes in %.2f seconds" % (metrics.bytes, metrics.duration))
log.info("Average rate: %.2f kbps" % metrics.kbps)
log.info("%.2f bytes in resent data" % metrics.resent_bytes)
log.info("Resent %d packets" % metrics.dupcount) | 0.002812 |
def fetch_rrlyrae_lc_params(**kwargs):
"""Fetch data from table 2 of Sesar 2010
This table includes observationally-derived parameters for all the
Sesar 2010 lightcurves.
"""
save_loc = _get_download_or_cache('table2.dat.gz', **kwargs)
dtype = [('id', 'i'), ('type', 'S2'), ('P', 'f'),
('uA', 'f'), ('u0', 'f'), ('uE', 'f'), ('uT', 'f'),
('gA', 'f'), ('g0', 'f'), ('gE', 'f'), ('gT', 'f'),
('rA', 'f'), ('r0', 'f'), ('rE', 'f'), ('rT', 'f'),
('iA', 'f'), ('i0', 'f'), ('iE', 'f'), ('iT', 'f'),
('zA', 'f'), ('z0', 'f'), ('zE', 'f'), ('zT', 'f')]
return np.loadtxt(save_loc, dtype=dtype) | 0.002928 |
def execute_reliabledictionary(client, application_name, service_name, input_file):
"""Execute create, update, delete operations on existing reliable dictionaries.
carry out create, update and delete operations on existing reliable dictionaries for given application and service.
:param application_name: Name of the application.
:type application_name: str
:param service_name: Name of the service.
:type service_name: str
:param output_file: input file with list of json to provide the operation information for reliable dictionaries.
"""
cluster = Cluster.from_sfclient(client)
service = cluster.get_application(application_name).get_service(service_name)
# call get service with headers and params
with open(input_file) as json_file:
json_data = json.load(json_file)
service.execute(json_data)
return | 0.006849 |
def notify_client(
notifier_uri,
client_id,
status_code,
message=None):
"""
Notify the client of the result of handling a request
The payload contains two elements:
- client_id
- result
The *client_id* is the id of the client to notify. It is assumed
that the notifier service is able to identify the client by this id
and that it can pass the *result* to it.
The *result* always contains a *status_code* element. In case the
message passed in is not None, it will also contain a *message*
element.
In case the notifier service does not exist or returns an error,
an error message will be logged to *stderr*.
"""
payload = {
"client_id": client_id,
"result": {
"response": {
"status_code": status_code
}
}
}
if message is not None:
payload["result"]["response"]["message"] = message
response = requests.post(notifier_uri, json=payload)
if response.status_code != 201:
sys.stderr.write("failed to notify client: {}\n".format(payload))
sys.stderr.flush() | 0.000865 |
def _calendar_json_for_occurrence(self, occurrence):
"""
Return JSON for a single Occurrence
"""
# Slugify the plugin's verbose name for use as a class name.
if occurrence.is_all_day:
start = occurrence.start
# `end` is exclusive according to the doc in
# http://fullcalendar.io/docs/event_data/Event_Object/, so
# we need to add 1 day to ``end`` to have the end date
# included in the calendar.
end = occurrence.start + timedelta(days=1)
else:
start = djtz.localize(occurrence.start)
end = djtz.localize(occurrence.end)
if occurrence.is_cancelled and occurrence.cancel_reason:
title = u"{0} [{1}]".format(
occurrence.event.title, occurrence.cancel_reason)
else:
title = occurrence.event.title
if occurrence.event.primary_type:
color = occurrence.event.primary_type.color
else:
color = "#cccccc"
return {
'title': title,
'allDay': occurrence.is_all_day or occurrence.event.contained_events.exists(),
'start': start,
'end': end,
'url': reverse('admin:icekit_events_eventbase_change',
args=[occurrence.event.pk]),
'className': self._calendar_classes_for_occurrence(occurrence),
'backgroundColor': color,
} | 0.002039 |
def multiplicative_jitter(x, epsilon=1e-2):
"""Multiply values by a random number between 1-epsilon and 1+epsilon.
Makes models more resilient to rounding errors introduced by bfloat16.
This seems particularly important for logits.
Args:
x: a mtf.Tensor
epsilon: a floating point value
Returns:
a mtf.Tensor with the same type and shape as x.
"""
if epsilon == 0:
return x
return x * mtf.random_uniform(
x.mesh, x.shape, minval=1.0 - epsilon, maxval=1.0+epsilon, dtype=x.dtype) | 0.007722 |
def name(self):
""" Return a basic meaningful name based on device type """
if (
self.device_type and
self.device_type.code in (DeviceType.MOBILE, DeviceType.TABLET)
):
return self.device
else:
return self.browser | 0.006826 |
def _fromTwosComplement(x, bits=16):
"""Calculate the inverse(?) of a two's complement of an integer.
Args:
* x (int): input integer.
* bits (int): number of bits, must be > 0.
Returns:
An int, that represents the inverse(?) of two's complement of the input.
Example for bits=8:
=== =======
x returns
=== =======
0 0
1 1
127 127
128 -128
129 -127
255 -1
=== =======
"""
_checkInt(bits, minvalue=0, description='number of bits')
_checkInt(x, description='input')
upperlimit = 2 ** (bits) - 1
lowerlimit = 0
if x > upperlimit or x < lowerlimit:
raise ValueError('The input value is out of range. Given value is {0}, but allowed range is {1} to {2} when using {3} bits.' \
.format(x, lowerlimit, upperlimit, bits))
# Calculate inverse(?) of two'2 complement
limit = 2 ** (bits - 1) - 1
if x <= limit:
return x
return x - 2 ** bits | 0.005061 |
def appliance_device_snmp_v3_users(self):
"""
Gets the ApplianceDeviceSNMPv3Users API client.
Returns:
ApplianceDeviceSNMPv3Users:
"""
if not self.__appliance_device_snmp_v3_users:
self.__appliance_device_snmp_v3_users = ApplianceDeviceSNMPv3Users(self.__connection)
return self.__appliance_device_snmp_v3_users | 0.007813 |
def filterchain_all(request, app, model, field, foreign_key_app_name,
foreign_key_model_name, foreign_key_field_name, value):
"""Returns filtered results followed by excluded results below."""
model_class = get_model(app, model)
keywords = get_keywords(field, value)
# SECURITY: Make sure all smart selects requests are opt-in
foreign_model_class = get_model(foreign_key_app_name, foreign_key_model_name)
if not any([(isinstance(f, ChainedManyToManyField) or
isinstance(f, ChainedForeignKey))
for f in foreign_model_class._meta.get_fields()]):
raise PermissionDenied("Smart select disallowed")
# filter queryset using limit_choices_to
limit_choices_to = get_limit_choices_to(foreign_key_app_name, foreign_key_model_name, foreign_key_field_name)
queryset = get_queryset(model_class, limit_choices_to=limit_choices_to)
filtered = list(do_filter(queryset, keywords))
# Sort results if model doesn't include a default ordering.
if not getattr(model_class._meta, 'ordering', False):
sort_results(list(filtered))
excluded = list(do_filter(queryset, keywords, exclude=True))
# Sort results if model doesn't include a default ordering.
if not getattr(model_class._meta, 'ordering', False):
sort_results(list(excluded))
# Empty choice to separate filtered and excluded results.
empty_choice = {'value': "", 'display': "---------"}
serialized_results = (
serialize_results(filtered) +
[empty_choice] +
serialize_results(excluded)
)
return JsonResponse(serialized_results, safe=False) | 0.001806 |
def get_build_container_dir(self, arch):
'''Given the arch name, returns the directory where it will be
built.
This returns a different directory depending on what
alternative or optional dependencies are being built.
'''
dir_name = self.get_dir_name()
return join(self.ctx.build_dir, 'other_builds',
dir_name, '{}__ndk_target_{}'.format(arch, self.ctx.ndk_api)) | 0.006834 |
def get_query_args(
self,
keep_blank_values: bool = False,
strict_parsing: bool = False,
encoding: str = "utf-8",
errors: str = "replace",
) -> list:
"""
Method to parse `query_string` using `urllib.parse.parse_qsl`.
This methods is used by `query_args` property.
Can be used directly if you need to change default parameters.
:param keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
:type keep_blank_values: bool
:param strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
:type strict_parsing: bool
:param encoding: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
:type encoding: str
:param errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
:type errors: str
:return: list
"""
if not self.parsed_not_grouped_args[
(keep_blank_values, strict_parsing, encoding, errors)
]:
if self.query_string:
self.parsed_not_grouped_args[
(keep_blank_values, strict_parsing, encoding, errors)
] = parse_qsl(
qs=self.query_string,
keep_blank_values=keep_blank_values,
strict_parsing=strict_parsing,
encoding=encoding,
errors=errors,
)
return self.parsed_not_grouped_args[
(keep_blank_values, strict_parsing, encoding, errors)
] | 0.001425 |
def _generate_cpu_stats():
"""Read and display processor name """
cpu_name = urwid.Text("CPU Name N/A", align="center")
try:
cpu_name = urwid.Text(get_processor_name().strip(), align="center")
except OSError:
logging.info("CPU name not available")
return [urwid.Text(('bold text', "CPU Detected"),
align="center"), cpu_name, urwid.Divider()] | 0.00463 |
def _get_samples(self, samples):
"""
Internal function. Prelude for each step() to read in perhaps
non empty list of samples to process. Input is a list of sample names,
output is a list of sample objects."""
## if samples not entered use all samples
if not samples:
samples = self.samples.keys()
## Be nice and allow user to pass in only one sample as a string,
## rather than a one element list. When you make the string into a list
## you have to wrap it in square braces or else list makes a list of
## each character individually.
if isinstance(samples, str):
samples = list([samples])
## if sample keys, replace with sample obj
assert isinstance(samples, list), \
"to subselect samples enter as a list, e.g., [A, B]."
newsamples = [self.samples.get(key) for key in samples \
if self.samples.get(key)]
strnewsamples = [i.name for i in newsamples]
## are there any samples that did not make it into the dict?
badsamples = set(samples).difference(set(strnewsamples))
if badsamples:
outstring = ", ".join(badsamples)
raise IPyradError(\
"Unrecognized Sample name(s) not linked to {}: {}"\
.format(self.name, outstring))
## require Samples
assert newsamples, \
"No Samples passed in and none in assembly {}".format(self.name)
return newsamples | 0.009873 |
def bust_self(self, obj):
"""Remove the value that is being stored on `obj` for this
:class:`.cached_property`
object.
:param obj: The instance on which to bust the cache.
"""
if self.func.__name__ in obj.__dict__:
delattr(obj, self.func.__name__) | 0.006494 |
def report_error_event(self, error_report):
"""Uses the gapic client to report the error.
:type error_report: dict
:param error_report:
payload of the error report formatted according to
https://cloud.google.com/error-reporting/docs/formatting-error-messages
This object should be built using
Use
:meth:~`google.cloud.error_reporting.client._build_error_report`
"""
project_name = self._gapic_api.project_path(self._project)
error_report_payload = report_errors_service_pb2.ReportedErrorEvent()
ParseDict(error_report, error_report_payload)
self._gapic_api.report_error_event(project_name, error_report_payload) | 0.002717 |
def find_ip4_by_id(self, id_ip):
"""
Get an IP by ID
:param id_ip: IP identifier. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{ ips { id: <id_ip4>,
oct1: <oct1>,
oct2: <oct2>,
oct3: <oct3>,
oct4: <oct4>,
equipamento: [ {all equipamentos related} ] ,
descricao: <descricao>} }
:raise IpNotAvailableError: Network dont have available IP.
:raise NetworkIPv4NotFoundError: Network was not found.
:raise UserNotAuthorizedError: User dont have permission to perform operation.
:raise InvalidParameterError: Ip identifier is none or invalid.
:raise XMLError: Networkapi failed to generate the XML response.
:raise DataBaseError: Networkapi failed to access the database.
"""
if not is_valid_int_param(id_ip):
raise InvalidParameterError(
u'Ip identifier is invalid or was not informed.')
url = 'ip/get/' + str(id_ip) + "/"
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml) | 0.002532 |
def find_version():
"""Only define version in one place"""
version_file = read_file('__init__.py')
version_match = re.search(r'^__version__ = ["\']([^"\']*)["\']',
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.') | 0.002924 |
def _get_object_from_python_path(python_path):
"""Method that will fetch a Marshmallow schema from a path to it.
Args:
python_path (str): The string path to the Marshmallow schema.
Returns:
marshmallow.Schema: The schema matching the provided path.
Raises:
TypeError: This is raised if the specified object isn't
a Marshmallow schema.
"""
# Dissect the path
python_path = python_path.split('.')
module_path = python_path[:-1]
object_class = python_path[-1]
if isinstance(module_path, list):
module_path = '.'.join(module_path)
# Grab the object
module = import_module(module_path)
schema = getattr(module, object_class)
if isclass(schema):
schema = schema()
return schema | 0.002288 |
def _ParseToken(self, file_object, file_offset):
"""Parses a token.
Args:
file_object (dfvfs.FileIO): file-like object.
file_offset (int): offset of the token relative to the start of
the file-like object.
Returns:
tuple: containing:
int: token type
object: token data or None if the token type is not supported.
"""
token_type = self._ParseTokenType(file_object, file_offset)
token_data = None
token_data_map_name = self._DATA_TYPE_MAP_PER_TOKEN_TYPE.get(
token_type, None)
if token_data_map_name:
token_data_map = self._GetDataTypeMap(token_data_map_name)
token_data, _ = self._ReadStructureFromFileObject(
file_object, file_offset + 1, token_data_map)
return token_type, token_data | 0.003755 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.