text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def report_role(self, role):
"""
Return the fields gathered.
"""
self.yaml_files = []
fields = {
"state": "skipped",
"total_files": self.gather_files(),
"total_lines": self.gather_lines(),
"total_facts": self.gather_facts(),
"total_defaults": self.gather_defaults(),
"facts": self.facts,
"defaults": self.defaults,
"meta": self.gather_meta(),
"readme": self.gather_readme(),
"dependencies": self.dependencies,
"total_dependencies": len(self.dependencies)
}
return fields | 0.00303 |
def min(self):
"""
Returns the minimum value of the domain.
:rtype: `float` or `np.inf`
"""
return int(self._min) if not np.isinf(self._min) else self._min | 0.010204 |
def execute_update(args):
"""Execute the update based on command line args and returns a dictionary
with 'execution result, ''response code', 'response info' and
'process friendly message'.
"""
provider_class = getattr(dnsupdater,
dnsupdater.AVAILABLE_PLUGINS.get(args.provider))
updater_options = {}
process_message = None
auth = None
if args.store: # --store argument
if provider_class.auth_type == 'T':
user_arg = args.usertoken or utils.read_input(
"Paste your auth token: ")
auth = authinfo.ApiAuth(usertoken=user_arg)
else:
user_arg = args.usertoken or utils.read_input(
"Type your username: ")
pass_arg = args.password or getpass.getpass("Type your password: ")
auth = authinfo.ApiAuth(user_arg, pass_arg)
authinfo.store(auth, args.provider, args.config)
exec_result = EXECUTION_RESULT_OK
if not args.hostname:
update_ddns = False
process_message = "Auth info stored."
else:
update_ddns = True
# informations arguments
elif args.usertoken and args.hostname:
if provider_class.auth_type == 'T':
auth = authinfo.ApiAuth(args.usertoken)
else:
auth = authinfo.ApiAuth(args.usertoken, args.password)
update_ddns = True
exec_result = EXECUTION_RESULT_OK
elif args.hostname:
if authinfo.exists(args.provider, args.config):
auth = authinfo.load(args.provider, args.config)
update_ddns = True
exec_result = EXECUTION_RESULT_OK
else:
update_ddns = False
exec_result = EXECUTION_RESULT_NOK
process_message = "No stored auth information found for " \
"provider: '%s'" % args.provider
else: # no arguments
update_ddns = False
exec_result = EXECUTION_RESULT_NOK
process_message = "Warning: The hostname to be updated must be " \
"provided.\nUsertoken and password can be either " \
"provided via command line or stored with --store " \
"option.\nExecute noipy --help for more details."
if update_ddns and args.provider == 'generic':
if args.url:
if not URL_RE.match(args.url):
process_message = "Malformed URL."
exec_result = EXECUTION_RESULT_NOK
update_ddns = False
else:
updater_options['url'] = args.url
else:
process_message = "Must use --url if --provider is 'generic' " \
"(default)"
exec_result = EXECUTION_RESULT_NOK
update_ddns = False
response_code = None
response_text = None
if update_ddns:
ip_address = args.ip if args.ip else utils.get_ip()
if not ip_address:
process_message = "Unable to get IP address. Check connection."
exec_result = EXECUTION_RESULT_NOK
elif ip_address == utils.get_dns_ip(args.hostname):
process_message = "No update required."
else:
updater = provider_class(auth, args.hostname, updater_options)
print("Updating hostname '%s' with IP address %s "
"[provider: '%s']..."
% (args.hostname, ip_address, args.provider))
response_code, response_text = updater.update_dns(ip_address)
process_message = updater.status_message
proc_result = {
'exec_result': exec_result,
'response_code': response_code,
'response_text': response_text,
'process_message': process_message,
}
return proc_result | 0.000261 |
def copy(self):
"""
Safely get a copy of the current mesh.
Copied objects will have emptied caches to avoid memory
issues and so may be slow on initial operations until
caches are regenerated.
Current object will *not* have its cache cleared.
Returns
---------
copied : trimesh.Trimesh
Copy of current mesh
"""
copied = Trimesh()
# copy vertex and face data
copied._data.data = copy.deepcopy(self._data.data)
# copy visual information
copied.visual = self.visual.copy()
# get metadata
copied.metadata = copy.deepcopy(self.metadata)
# get center_mass and density
if self._center_mass is not None:
copied.center_mass = self.center_mass
copied._density = self._density
# make sure cache is set from here
copied._cache.clear()
return copied | 0.00211 |
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a ConfigParser.RawConfigParser, or None
"""
f = open(dist_filename,'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended-12)
import struct, StringIO, ConfigParser
tag, cfglen, bmlen = struct.unpack("<iii",f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended-(12+cfglen))
cfg = ConfigParser.RawConfigParser({'version':'','target_version':''})
try:
part = f.read(cfglen)
# part is in bytes, but we need to read up to the first null
# byte.
if sys.version_info >= (2,6):
null_byte = bytes([0])
else:
null_byte = chr(0)
config = part.split(null_byte, 1)[0]
# Now the config is in bytes, but on Python 3, it must be
# unicode for the RawConfigParser, so decode it. Is this the
# right encoding?
config = config.decode('ascii')
cfg.readfp(StringIO.StringIO(config))
except ConfigParser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close() | 0.00507 |
def forum_topic_get_by_tag_for_user(self, tag=None, author=None):
"""Get all forum topics with a specific tag"""
if not tag:
return None
if author:
r = self._request('ebuio/forum/search/bytag/' + tag + '?u=' + author)
else:
r = self._request('ebuio/forum/search/bytag/' + tag)
if not r:
return None
retour = []
for data in r.json().get('data', []):
retour.append(data)
return retour | 0.005871 |
def validate_metadata_sign(xml, cert=None, fingerprint=None, fingerprintalg='sha1', validatecert=False, debug=False):
"""
Validates a signature of a EntityDescriptor.
:param xml: The element we should validate
:type: string | Document
:param cert: The public cert
:type: string
:param fingerprint: The fingerprint of the public cert
:type: string
:param fingerprintalg: The algorithm used to build the fingerprint
:type: string
:param validatecert: If true, will verify the signature and if the cert is valid.
:type: bool
:param debug: Activate the xmlsec debug
:type: bool
:param raise_exceptions: Whether to return false on failure or raise an exception
:type raise_exceptions: Boolean
"""
if xml is None or xml == '':
raise Exception('Empty string supplied as input')
elem = OneLogin_Saml2_XML.to_etree(xml)
xmlsec.enable_debug_trace(debug)
xmlsec.tree.add_ids(elem, ["ID"])
signature_nodes = OneLogin_Saml2_XML.query(elem, '/md:EntitiesDescriptor/ds:Signature')
if len(signature_nodes) == 0:
signature_nodes += OneLogin_Saml2_XML.query(elem, '/md:EntityDescriptor/ds:Signature')
if len(signature_nodes) == 0:
signature_nodes += OneLogin_Saml2_XML.query(elem, '/md:EntityDescriptor/md:SPSSODescriptor/ds:Signature')
signature_nodes += OneLogin_Saml2_XML.query(elem, '/md:EntityDescriptor/md:IDPSSODescriptor/ds:Signature')
if len(signature_nodes) > 0:
for signature_node in signature_nodes:
# Raises expection if invalid
OneLogin_Saml2_Utils.validate_node_sign(signature_node, elem, cert, fingerprint, fingerprintalg, validatecert, debug, raise_exceptions=True)
return True
else:
raise Exception('Could not validate metadata signature: No signature nodes found.') | 0.005462 |
def initialize(self):
""" A reimplemented initializer.
This method will add the include objects to the parent of the
include and ensure that they are initialized.
"""
super(Block, self).initialize()
block = self.block
if block: #: This block is setting the content of another block
#: Remove the existing blocks children
if self.mode == 'replace':
#: Clear the blocks children
for c in block.children:
c.destroy()
#: Add this blocks children to the other block
block.insert_children(None, self.children)
else: #: This block is inserting it's children into it's parent
self.parent.insert_children(self, self.children) | 0.005019 |
def _get_http(url, temp_file_name, initial_size, file_size, verbose_bool,
progressbar, ncols=80):
"""Safely (resume a) download to a file from http(s)."""
# Actually do the reading
req = urllib.request.Request(url)
if initial_size > 0:
req.headers['Range'] = 'bytes=%s-' % (initial_size,)
try:
response = urllib.request.urlopen(req)
except Exception:
# There is a problem that may be due to resuming, some
# servers may not support the "Range" header. Switch
# back to complete download method
tqdm.write('Resuming download failed (server '
'rejected the request). Attempting to '
'restart downloading the entire file.')
del req.headers['Range']
response = urllib.request.urlopen(req)
total_size = int(response.headers.get('Content-Length', '1').strip())
if initial_size > 0 and file_size == total_size:
tqdm.write('Resuming download failed (resume file size '
'mismatch). Attempting to restart downloading the '
'entire file.')
initial_size = 0
total_size += initial_size
if total_size != file_size:
raise RuntimeError('URL could not be parsed properly')
mode = 'ab' if initial_size > 0 else 'wb'
if progressbar is True:
progress = tqdm(total=total_size, initial=initial_size, desc='file_sizes',
ncols=ncols, unit='B', unit_scale=True)
chunk_size = 8192 # 2 ** 13
with open(temp_file_name, mode) as local_file:
while True:
t0 = time.time()
chunk = response.read(chunk_size)
dt = time.time() - t0
if dt < 0.005:
chunk_size *= 2
elif dt > 0.1 and chunk_size > 8192:
chunk_size = chunk_size // 2
if not chunk:
break
local_file.write(chunk)
if progressbar is True:
progress.update(len(chunk)) | 0.000988 |
def loader_cls(self):
"""Loader class used in `JsonRef.replace_refs`."""
cls = self.app.config['JSONSCHEMAS_LOADER_CLS']
if isinstance(cls, six.string_types):
return import_string(cls)
return cls | 0.008368 |
def f_get_explored_parameters(self, fast_access=False, copy=True):
""" Returns a dictionary containing the full parameter names as keys and the parameters
or the parameter data items as values.
IMPORTANT: This dictionary always contains all explored parameters as keys.
Even when they are not loaded, in this case the value is simply `None`.
`fast_access` only works if all explored parameters are loaded.
:param fast_access:
Determines whether the parameter objects or their values are returned
in the dictionary.
:param copy:
Whether the original dictionary or a shallow copy is returned.
If you want the real dictionary please do not modify it at all!
Not Copying and fast access do not work at the same time! Raises ValueError
if fast access is true and copy false.
:return: Dictionary containing the parameters.
:raises: ValueError
"""
return self._return_item_dictionary(self._explored_parameters, fast_access, copy) | 0.007306 |
def end_parallel(self):
"""
Ends a parallel region by merging the channels into a single stream.
Returns:
Stream: Stream for which subsequent transformations are no longer parallelized.
.. seealso:: :py:meth:`set_parallel`, :py:meth:`parallel`
"""
outport = self.oport
if isinstance(self.oport.operator, streamsx.topology.graph.Marker):
if self.oport.operator.kind == "$Union$":
pto = self.topology.graph.addPassThruOperator()
pto.addInputPort(outputPort=self.oport)
outport = pto.addOutputPort(schema=self.oport.schema)
op = self.topology.graph.addOperator("$EndParallel$")
op.addInputPort(outputPort=outport)
oport = op.addOutputPort(schema=self.oport.schema)
endP = Stream(self.topology, oport)
return endP | 0.003413 |
def resize(self, dims):
"""Resize our drawing area to encompass a space defined by the
given dimensions.
"""
width, height = dims[:2]
self.gl_resize(width, height) | 0.009852 |
def open_any(cls, file):
"""Open an image file. If the image is not PNG format, it would convert
the image into PNG with Pillow module. If the module is not
installed, :class:`ImportError` would be raised.
:arg file: Input file.
:type file: path-like or file-like
:rtype: :class:`PNG`
"""
with open_file(file, "rb") as f:
header = f.read(8)
f.seek(0)
if header != PNG_SIGN:
b = file_to_png(f)
else:
b = f.read()
return cls.from_bytes(b) | 0.039832 |
def enqueue_jobs(self):
"""
Move scheduled jobs into queues.
"""
self.log.debug('Checking for scheduled jobs')
jobs = self.get_jobs_to_queue()
for job in jobs:
self.enqueue_job(job)
# Refresh scheduler key's expiry
self.connection.expire(self.scheduler_key, int(self._interval) + 10)
return jobs | 0.005249 |
def action(self):
"""
This class overrides this method
"""
commandline = "{0} {1}".format(self.command, " ".join(self.arguments))
try:
completed_process = subprocess.run(commandline, shell=True)
self.exit_status = completed_process.returncode
except AttributeError:
self.exit_status = subprocess.call(commandline, shell=True) | 0.00489 |
def simulation_manager(self, thing=None, **kwargs):
"""
Constructs a new simulation manager.
:param thing: Optional - What to put in the new SimulationManager's active stash (either a SimState or a list of SimStates).
:param kwargs: Any additional keyword arguments will be passed to the SimulationManager constructor
:returns: The new SimulationManager
:rtype: angr.sim_manager.SimulationManager
Many different types can be passed to this method:
* If nothing is passed in, the SimulationManager is seeded with a state initialized for the program
entry point, i.e. :meth:`entry_state()`.
* If a :class:`SimState` is passed in, the SimulationManager is seeded with that state.
* If a list is passed in, the list must contain only SimStates and the whole list will be used to seed the SimulationManager.
"""
if thing is None:
thing = [ self.entry_state() ]
elif isinstance(thing, (list, tuple)):
if any(not isinstance(val, SimState) for val in thing):
raise AngrError("Bad type to initialize SimulationManager")
elif isinstance(thing, SimState):
thing = [ thing ]
else:
raise AngrError("BadType to initialze SimulationManager: %s" % repr(thing))
return SimulationManager(self.project, active_states=thing, **kwargs) | 0.008169 |
def _if_to_py_ast(ctx: GeneratorContext, node: If) -> GeneratedPyAST:
"""Generate an intermediate if statement which assigns to a temporary
variable, which is returned as the expression value at the end of
evaluation.
Every expression in Basilisp is true if it is not the literal values nil
or false. This function compiles direct checks for the test value against
the Python values None and False to accommodate this behavior.
Note that the if and else bodies are switched in compilation so that we
can perform a short-circuit or comparison, rather than exhaustively checking
for both false and nil each time."""
assert node.op == NodeOp.IF
test_ast = gen_py_ast(ctx, node.test)
result_name = genname(_IF_RESULT_PREFIX)
then_ast = __if_body_to_py_ast(ctx, node.then, result_name)
else_ast = __if_body_to_py_ast(ctx, node.else_, result_name)
test_name = genname(_IF_TEST_PREFIX)
test_assign = ast.Assign(
targets=[ast.Name(id=test_name, ctx=ast.Store())], value=test_ast.node
)
ifstmt = ast.If(
test=ast.BoolOp(
op=ast.Or(),
values=[
ast.Compare(
left=ast.NameConstant(None),
ops=[ast.Is()],
comparators=[ast.Name(id=test_name, ctx=ast.Load())],
),
ast.Compare(
left=ast.NameConstant(False),
ops=[ast.Is()],
comparators=[ast.Name(id=test_name, ctx=ast.Load())],
),
],
),
values=[],
body=list(map(statementize, chain(else_ast.dependencies, [else_ast.node]))),
orelse=list(map(statementize, chain(then_ast.dependencies, [then_ast.node]))),
)
return GeneratedPyAST(
node=ast.Name(id=result_name, ctx=ast.Load()),
dependencies=list(chain(test_ast.dependencies, [test_assign, ifstmt])),
) | 0.002043 |
def Glyph(actor, glyphObj, orientationArray="",
scaleByVectorSize=False, c=None, alpha=1):
"""
At each vertex of a mesh, another mesh - a `'glyph'` - is shown with
various orientation options and coloring.
Color can be specfied as a colormap which maps the size of the orientation
vectors in `orientationArray`.
:param orientationArray: list of vectors, ``vtkAbstractArray``
or the name of an already existing points array.
:type orientationArray: list, str, vtkAbstractArray
:param bool scaleByVectorSize: glyph mesh is scaled by the size of
the vectors.
.. hint:: |glyphs| |glyphs.py|_
|glyphs_arrow| |glyphs_arrow.py|_
"""
cmap = None
# user passing a color map to map orientationArray sizes
if c in list(colors._mapscales.keys()):
cmap = c
c = None
# user is passing an array of point colors
if utils.isSequence(c) and len(c) > 3:
ucols = vtk.vtkUnsignedCharArray()
ucols.SetNumberOfComponents(3)
ucols.SetName("glyphRGB")
for col in c:
cl = colors.getColor(col)
ucols.InsertNextTuple3(cl[0]*255, cl[1]*255, cl[2]*255)
actor.polydata().GetPointData().SetScalars(ucols)
c = None
if isinstance(glyphObj, Actor):
glyphObj = glyphObj.clean().polydata()
gly = vtk.vtkGlyph3D()
gly.SetInputData(actor.polydata())
gly.SetSourceData(glyphObj)
gly.SetColorModeToColorByScalar()
if orientationArray != "":
gly.OrientOn()
gly.SetScaleFactor(1)
if scaleByVectorSize:
gly.SetScaleModeToScaleByVector()
else:
gly.SetScaleModeToDataScalingOff()
if orientationArray == "normals" or orientationArray == "Normals":
gly.SetVectorModeToUseNormal()
elif isinstance(orientationArray, vtk.vtkAbstractArray):
actor.GetMapper().GetInput().GetPointData().AddArray(orientationArray)
actor.GetMapper().GetInput().GetPointData().SetActiveVectors("glyph_vectors")
gly.SetInputArrayToProcess(0, 0, 0, 0, "glyph_vectors")
gly.SetVectorModeToUseVector()
elif utils.isSequence(orientationArray): # passing a list
actor.addPointVectors(orientationArray, "glyph_vectors")
gly.SetInputArrayToProcess(0, 0, 0, 0, "glyph_vectors")
else: # passing a name
gly.SetInputArrayToProcess(0, 0, 0, 0, orientationArray)
gly.SetVectorModeToUseVector()
if cmap:
gly.SetColorModeToColorByVector ()
else:
gly.SetColorModeToColorByScalar ()
gly.Update()
pd = gly.GetOutput()
actor = Actor(pd, c, alpha)
if cmap:
lut = vtk.vtkLookupTable()
lut.SetNumberOfTableValues(512)
lut.Build()
for i in range(512):
r, g, b = colors.colorMap(i, cmap, 0, 512)
lut.SetTableValue(i, r, g, b, 1)
actor.mapper.SetLookupTable(lut)
actor.mapper.ScalarVisibilityOn()
actor.mapper.SetScalarModeToUsePointData()
rng = pd.GetPointData().GetScalars().GetRange()
actor.mapper.SetScalarRange(rng[0], rng[1])
actor.GetProperty().SetInterpolationToFlat()
settings.collectable_actors.append(actor)
return actor | 0.004177 |
def symbol_to_id(self, symbol):
"""Returns the list of Entrez IDs for a given Geneways symbol
(there may be more than one)"""
if symbol not in self.symbols_to_ids:
m = 'Could not look up Entrez ID for Geneways symbol ' + symbol
raise Exception(m)
return self.symbols_to_ids[symbol] | 0.005917 |
def element_at(index):
"""Create a transducer which obtains the item at the specified index."""
if index < 0:
raise IndexError("element_at used with illegal index {}".format(index))
def element_at_transducer(reducer):
return ElementAt(reducer, index)
return element_at_transducer | 0.003185 |
def eval_in_new(cls, expr, *args, **kwargs):
""":meth:`eval` an expression in a new, temporary :class:`Context`.
This should be safe to use directly on user input.
Args:
expr (LispVal): The expression to evaluate.
*args: Args for the :class:`Context` constructor.
**kwargs: Kwargs for the :class:`Context` constructor.
"""
ctx = cls(*args, **kwargs)
ctx.env.rec_new(expr)
return ctx.eval(expr) | 0.004107 |
def wantMethod(self, method):
"""Accept the method if its attributes match.
"""
try:
cls = method.im_class
except AttributeError:
return False
return self.validateAttrib(method, cls) | 0.00813 |
def sync_perm_for_dag(self, dag_id, access_control=None):
"""
Sync permissions for given dag id. The dag id surely exists in our dag bag
as only / refresh button or cli.sync_perm will call this function
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: string
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict
:return:
"""
for dag_perm in self.DAG_PERMS:
perm_on_dag = self.find_permission_view_menu(dag_perm, dag_id)
if perm_on_dag is None:
self.add_permission_view_menu(dag_perm, dag_id)
if access_control:
self._sync_dag_view_permissions(dag_id, access_control) | 0.003452 |
def _generateOverlapping(filename="overlap.csv", numSequences=2, elementsPerSeq=3,
numRepeats=10, hub=[0,1], hubOffset=1, resets=False):
""" Generate a temporal dataset containing sequences that overlap one or more
elements with other sequences.
Parameters:
----------------------------------------------------
filename: name of the file to produce, including extension. It will
be created in a 'datasets' sub-directory within the
directory containing this script.
numSequences: how many sequences to generate
elementsPerSeq: length of each sequence
numRepeats: how many times to repeat each sequence in the output
hub: sub-sequence to place within each other sequence
hubOffset: where, within each sequence, to place the hub
resets: if True, turn on reset at start of each sequence
"""
# Check for conflicts in arguments
assert (hubOffset + len(hub) <= elementsPerSeq)
# Create the output file
scriptDir = os.path.dirname(__file__)
pathname = os.path.join(scriptDir, 'datasets', filename)
print "Creating %s..." % (pathname)
fields = [('reset', 'int', 'R'),
('field1', 'string', ''),
('field2', 'float', '')]
outFile = FileRecordStream(pathname, write=True, fields=fields)
# Create the sequences with the hub in the middle
sequences = []
nextElemIdx = max(hub)+1
for _ in range(numSequences):
seq = []
for j in range(hubOffset):
seq.append(nextElemIdx)
nextElemIdx += 1
for j in hub:
seq.append(j)
j = hubOffset + len(hub)
while j < elementsPerSeq:
seq.append(nextElemIdx)
nextElemIdx += 1
j += 1
sequences.append(seq)
# Write out the sequences in random order
seqIdxs = []
for _ in range(numRepeats):
seqIdxs += range(numSequences)
random.shuffle(seqIdxs)
for seqIdx in seqIdxs:
reset = int(resets)
seq = sequences[seqIdx]
for (x) in seq:
outFile.appendRecord([reset, str(x), x])
reset = 0
outFile.close() | 0.023245 |
def decorate(text, style):
""" Console decoration style definitions
:param text: the text string to decorate
:type text: str
:param style: the style used to decorate the string
:type style: str
:return: a decorated string
:rtype: str
"""
return {
'step-maj': click.style("\n" + '> ' + text, fg='yellow', bold=True),
'step-min': click.style(' - ' + text + ' ', bold=True),
'item-maj': click.style(' - ' + text + ' '),
'item-min': click.style(' - ' + text + ' '),
'quote-head-fail': click.style("\n" + chr(9485) + (chr(9480)*2) + ' ' + text, fg='red'),
'quote-head-pass': click.style("\n" + chr(9485) + (chr(9480)*2) + ' ' + text, fg='green'),
'quote-head-skip': click.style("\n" + chr(9485) + (chr(9480)*2) + ' ' + text, fg='yellow'),
'quote-fail': re.sub('^', click.style(chr(9482) + ' ', fg='red'), text, flags=re.M),
'quote-pass': re.sub('^', click.style(chr(9482) + ' ', fg='green'), text, flags=re.M),
'quote-skip': re.sub('^', click.style(chr(9482) + ' ', fg='yellow'), text, flags=re.M),
'fail': click.style(text + ' ', fg='red'),
'pass': click.style(text + ' ', fg='green'),
'skip': click.style(text + ' ', fg='yellow')
}.get(style, '') | 0.005291 |
def index_data(self, data, index_name, doc_type):
"""Index data in Stub Indexer."""
print 'ELS Stub Indexer getting called...'
print '%s %s %s %s' % (self, data, index_name, doc_type) | 0.009615 |
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name) | 0.004635 |
def setTxPower(self, tx_power):
"""Set the transmission power for one or more antennas.
@param tx_power: index into self.tx_power_table
"""
tx_pow_validated = self.get_tx_power(tx_power)
logger.debug('tx_pow_validated: %s', tx_pow_validated)
needs_update = False
for ant, (tx_pow_idx, tx_pow_dbm) in tx_pow_validated.items():
if self.tx_power[ant] != tx_pow_idx:
self.tx_power[ant] = tx_pow_idx
needs_update = True
logger.debug('tx_power for antenna %s: %s (%s dBm)', ant,
tx_pow_idx, tx_pow_dbm)
if needs_update and self.state == LLRPClient.STATE_INVENTORYING:
logger.debug('changing tx power; will stop politely, then resume')
d = self.stopPolitely()
d.addCallback(self.startInventory, force_regen_rospec=True) | 0.002232 |
def _determine_supported_alleles(command, supported_allele_flag):
"""
Try asking the commandline predictor (e.g. netMHCpan)
which alleles it supports.
"""
try:
# convert to str since Python3 returns a `bytes` object
supported_alleles_output = check_output([
command, supported_allele_flag
])
supported_alleles_str = supported_alleles_output.decode("ascii", "ignore")
assert len(supported_alleles_str) > 0, \
'%s returned empty allele list' % command
supported_alleles = set([])
for line in supported_alleles_str.split("\n"):
line = line.strip()
if not line.startswith('#') and len(line) > 0:
try:
# We need to normalize these alleles (the output of the predictor
# when it lists its supported alleles) so that they are comparable with
# our own alleles.
supported_alleles.add(normalize_allele_name(line))
except AlleleParseError as error:
logger.info("Skipping allele %s: %s", line, error)
continue
if len(supported_alleles) == 0:
raise ValueError("Unable to determine supported alleles")
return supported_alleles
except Exception as e:
logger.exception(e)
raise SystemError("Failed to run %s %s. Possibly an incorrect executable version?" % (
command,
supported_allele_flag)) | 0.003632 |
def add_element(self, element):
"""
Element can be href or type :py:class:`smc.base.model.Element`
::
>>> from smc.elements.other import Category
>>> category = Category('foo')
>>> category.add_element(Host('kali'))
:param str,Element element: element to add to tag
:raises: ModificationFailed: failed adding element
:return: None
"""
element = element_resolver(element)
self.make_request(
ModificationFailed,
method='create',
resource='category_add_element',
json={'value': element}) | 0.00311 |
def freeze_of_gait(self, x):
"""
This method assess freeze of gait following :cite:`g-BachlinPRMHGT10`.
:param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc.
:type x: pandas.Series
:return freeze_time: What times do freeze of gait events occur. [measured in time (h:m:s)]
:rtype freeze_time: numpy.ndarray
:return freeze_indexe: Freeze Index is defined as the power in the “freeze” band [3–8 Hz] divided by the power in the “locomotor” band [0.5–3 Hz] [3]. [measured in Hz]
:rtype freeze_indexe: numpy.ndarray
:return list locomotor_freeze_index: Locomotor freeze index is the power in the “freeze” band [3–8 Hz] added to power in the “locomotor” band [0.5–3 Hz]. [measured in Hz]
:rtype locomotor_freeze_index: numpy.ndarray
"""
data = self.resample_signal(x).values
f_res = self.sampling_frequency / self.window
f_nr_LBs = int(self.loco_band[0] / f_res)
f_nr_LBe = int(self.loco_band[1] / f_res)
f_nr_FBs = int(self.freeze_band[0] / f_res)
f_nr_FBe = int(self.freeze_band[1] / f_res)
jPos = self.window + 1
i = 0
time = []
sumLocoFreeze = []
freezeIndex = []
while jPos < len(data):
jStart = jPos - self.window
time.append(jPos)
y = data[int(jStart):int(jPos)]
y = y - np.mean(y)
Y = np.fft.fft(y, int(self.window))
Pyy = abs(Y*Y) / self.window
areaLocoBand = numerical_integration( Pyy[f_nr_LBs-1 : f_nr_LBe], self.sampling_frequency )
areaFreezeBand = numerical_integration( Pyy[f_nr_FBs-1 : f_nr_FBe], self.sampling_frequency )
sumLocoFreeze.append(areaFreezeBand + areaLocoBand)
freezeIndex.append(areaFreezeBand / areaLocoBand)
jPos = jPos + self.step_size
i = i + 1
freeze_time = np.asarray(time, dtype=np.int32)
freeze_index = np.asarray(freezeIndex, dtype=np.float32)
locomotor_freeze_index = np.asarray(sumLocoFreeze, dtype=np.float32)
return freeze_time, freeze_index, locomotor_freeze_index | 0.008322 |
def as_download(self, data, block_num=-1):
"""
Downloads a DB data into the AG asynchronously.
A whole block (including header and footer) must be available into the
user buffer.
:param block_num: New Block number (or -1)
:param data: the user buffer
"""
size = len(data)
type_ = c_byte * len(data)
cdata = type_.from_buffer_copy(data)
return self.library.Cli_AsDownload(self.pointer, block_num,
byref(cdata), size) | 0.003656 |
def flatten(iterable, check=is_iterable):
"""Produces a recursively flattened version of ``iterable``
``check``
Recurses only if check(value) is true.
"""
for value in iterable:
if check(value):
for flat in flatten(value, check):
yield flat
else:
yield value | 0.00295 |
def update_resource_value(self, device_id, _resource_path, resource_value, **kwargs): # noqa: E501
"""Write to a resource or use write-attributes for a resource # noqa: E501
With this API, you can [write a new value to existing resources](/docs/current/connecting/handle-resource-webapp.html) or [use the write-attributes](/docs/current/connecting/resource-change-webapp.html) for a resource. This API can also be used to transfer files to the device. Device Management Connect LwM2M server implements the Option 1 from RFC7959. The maximum block size is 1024 bytes. The block size versus transferred file size is something to note in low quality networks. The customer application needs to know what type of file is transferred (for example txt) and the payload can be encrypted by the customer. The maximum size of payload is 1048576 bytes. All resource APIs are asynchronous. These APIs respond only if the device is turned on and connected to Device Management Connect and there is an active notification channel. Supported content types depend on the device and its resource. Device Management translates HTTP to equivalent CoAP content type. **Example usage:** This example sets the alarm on a buzzer. The command writes the [Buzzer](http://www.openmobilealliance.org/tech/profiles/lwm2m/3338.xml) instance 0, \"On/Off\" boolean resource to '1'. curl -X PUT \\ https://api.us-east-1.mbedcloud.com/v2/endpoints/{device-id}/3338/0/5850 -H \"content-type: text/plain\" \\ -H 'authorization: Bearer {api-key}' -d '1' # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.update_resource_value(device_id, _resource_path, resource_value, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str device_id: A unique Device Management device ID for the endpoint. Note that the ID must be an exact match. You cannot use wildcards here. (required)
:param str _resource_path: Resource URL. (required)
:param str resource_value: The value to be set to the resource. (required)
:param bool no_resp: <br/><br/><b>Non-confirmable requests</b><br/> All resource APIs have the parameter noResp. If you make a request with `noResp=true`, Device Management Connect makes a CoAP non-confirmable request to the device. Such requests are not guaranteed to arrive in the device, and you do not get back an async-response-id. If calls with this parameter enabled succeed, they return with the status code `204 No Content`. If the underlying protocol does not support non-confirmable requests, or if the endpoint is registered in queue mode, the response is status code `409 Conflict`.
:return: AsyncID
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.update_resource_value_with_http_info(device_id, _resource_path, resource_value, **kwargs) # noqa: E501
else:
(data) = self.update_resource_value_with_http_info(device_id, _resource_path, resource_value, **kwargs) # noqa: E501
return data | 0.0006 |
def dict2kvlist(o):
'''
Serializes a dict-like object into a generator of the flatten list of
repeating key-value pairs. It is useful when using HMSET method in Redis.
Example:
>>> list(dict2kvlist({'a': 1, 'b': 2}))
['a', 1, 'b', 2]
'''
return chain.from_iterable((k, v) for k, v in o.items()) | 0.003049 |
def structure_to_abivars(structure, **kwargs):
"""
Receives a structure and returns a dictionary with the ABINIT variables.
"""
if not structure.is_ordered:
raise ValueError("""\
Received disordered structure with partial occupancies that cannot be converted into an Abinit input
Please use OrderDisorderedStructureTransformation or EnumerateStructureTransformation
to build an appropriate supercell from partial occupancies or alternatively use the Virtual Crystal Approximation.""")
types_of_specie = structure.types_of_specie
natom = structure.num_sites
znucl_type = [specie.number for specie in types_of_specie]
znucl_atoms = structure.atomic_numbers
typat = np.zeros(natom, np.int)
for atm_idx, site in enumerate(structure):
typat[atm_idx] = types_of_specie.index(site.specie) + 1
rprim = ArrayWithUnit(structure.lattice.matrix, "ang").to("bohr")
angdeg = structure.lattice.angles
xred = np.reshape([site.frac_coords for site in structure], (-1, 3))
# Set small values to zero. This usually happens when the CIF file
# does not give structure parameters with enough digits.
rprim = np.where(np.abs(rprim) > 1e-8, rprim, 0.0)
xred = np.where(np.abs(xred) > 1e-8, xred, 0.0)
# Info on atoms.
d = dict(
natom=natom,
ntypat=len(types_of_specie),
typat=typat,
znucl=znucl_type,
xred=xred,
)
# Add info on the lattice.
# Should we use (rprim, acell) or (angdeg, acell) to specify the lattice?
geomode = kwargs.pop("geomode", "rprim")
if geomode == "automatic":
geomode = "rprim"
if structure.lattice.is_hexagonal: # or structure.lattice.is_rhombohedral
geomode = "angdeg"
angdeg = structure.lattice.angles
# Here one could polish a bit the numerical values if they are not exact.
# Note that in pmg the angles are 12, 20, 01 while in Abinit 12, 02, 01
# One should make sure that the orientation is preserved (see Curtarolo's settings)
if geomode == "rprim":
d.update(
acell=3 * [1.0],
rprim=rprim,
)
elif geomode == "angdeg":
d.update(
acell=ArrayWithUnit(structure.lattice.abc, "ang").to("bohr"),
angdeg=angdeg,
)
else:
raise ValueError("Wrong value for geomode: %s" % geomode)
return d | 0.003705 |
def nlmsg_seq(self, value):
"""Sequence setter."""
self.bytearray[self._get_slicers(3)] = bytearray(c_uint32(value or 0)) | 0.014599 |
def make_path(base_uri, path, filename, path_dimensions, split_length):
"""Generate a path as base location for file instance.
:param base_uri: The base URI.
:param path: The relative path.
:param path_dimensions: Number of chunks the path should be split into.
:param split_length: The length of any chunk.
:returns: A string representing the full path.
"""
assert len(path) > path_dimensions * split_length
uri_parts = []
for i in range(path_dimensions):
uri_parts.append(path[0:split_length])
path = path[split_length:]
uri_parts.append(path)
uri_parts.append(filename)
return os.path.join(base_uri, *uri_parts) | 0.00146 |
def actual_query_range(self):
"""This is the actual query range for the positive strand
:returns: Range of query positive strand covered
:rtype: GenomicRange
"""
a = self.alignment_ranges
#return GenomicRange(a[0][1].chr,a[0][1].start,a[-1][1].end,self.get_strand())
if self.get_strand() == '+':
return GenomicRange(a[0][1].chr,a[0][1].start,a[-1][1].end,self.get_strand())
#must be - strand
return GenomicRange(a[0][1].chr,self.query_sequence_length-a[-1][1].end+1,self.query_sequence_length-a[0][1].start+1,dir=self.strand) | 0.022847 |
def rewrite_record(bdist_dir):
""" Rewrite RECORD file with hashes for all files in `wheel_sdir`
Copied from :method:`wheel.bdist_wheel.bdist_wheel.write_record`
Will also unsign wheel
Parameters
----------
bdist_dir : str
Path of unpacked wheel file
"""
info_dirs = glob.glob(pjoin(bdist_dir, '*.dist-info'))
if len(info_dirs) != 1:
raise WheelToolsError("Should be exactly one `*.dist_info` directory")
record_path = pjoin(info_dirs[0], 'RECORD')
record_relpath = relpath(record_path, bdist_dir)
# Unsign wheel - because we're invalidating the record hash
sig_path = pjoin(info_dirs[0], 'RECORD.jws')
if exists(sig_path):
os.unlink(sig_path)
def walk():
for dir, dirs, files in os.walk(bdist_dir):
for f in files:
yield pjoin(dir, f)
def skip(path):
"""Wheel hashes every possible file."""
return (path == record_relpath)
with _open_for_csv(record_path, 'w+') as record_file:
writer = csv.writer(record_file)
for path in walk():
relative_path = relpath(path, bdist_dir)
if skip(relative_path):
hash = ''
size = ''
else:
with open(path, 'rb') as f:
data = f.read()
digest = hashlib.sha256(data).digest()
hash = 'sha256=' + native(urlsafe_b64encode(digest))
size = len(data)
path_for_record = relpath(
path, bdist_dir).replace(psep, '/')
writer.writerow((path_for_record, hash, size)) | 0.000609 |
def derive_key(self,
derivation_method,
derivation_length,
derivation_data=None,
key_material=None,
hash_algorithm=None,
salt=None,
iteration_count=None,
encryption_algorithm=None,
cipher_mode=None,
padding_method=None,
iv_nonce=None):
"""
Derive key data using a variety of key derivation functions.
Args:
derivation_method (DerivationMethod): An enumeration specifying
the key derivation method to use. Required.
derivation_length (int): An integer specifying the size of the
derived key data in bytes. Required.
derivation_data (bytes): The non-cryptographic bytes to be used
in the key derivation process (e.g., the data to be encrypted,
hashed, HMACed). Required in the general case. Optional if the
derivation method is Hash and the key material is provided.
Optional, defaults to None.
key_material (bytes): The bytes of the key material to use for
key derivation. Required in the general case. Optional if
the derivation_method is HASH and derivation_data is provided.
Optional, defaults to None.
hash_algorithm (HashingAlgorithm): An enumeration specifying the
hashing algorithm to use with the key derivation method.
Required in the general case, optional if the derivation
method specifies encryption. Optional, defaults to None.
salt (bytes): Bytes representing a randomly generated salt.
Required if the derivation method is PBKDF2. Optional,
defaults to None.
iteration_count (int): An integer representing the number of
iterations to use when deriving key material. Required if
the derivation method is PBKDF2. Optional, defaults to None.
encryption_algorithm (CryptographicAlgorithm): An enumeration
specifying the symmetric encryption algorithm to use for
encryption-based key derivation. Required if the derivation
method specifies encryption. Optional, defaults to None.
cipher_mode (BlockCipherMode): An enumeration specifying the
block cipher mode to use with the encryption algorithm.
Required in in the general case if the derivation method
specifies encryption and the encryption algorithm is
specified. Optional if the encryption algorithm is RC4 (aka
ARC4). Optional, defaults to None.
padding_method (PaddingMethod): An enumeration specifying the
padding method to use on the data before encryption. Required
in in the general case if the derivation method specifies
encryption and the encryption algorithm is specified. Required
if the cipher mode is for block ciphers (e.g., CBC, ECB).
Optional otherwise, defaults to None.
iv_nonce (bytes): The IV/nonce value to use to initialize the mode
of the encryption algorithm. Required in the general case if
the derivation method specifies encryption and the encryption
algorithm is specified. Optional, defaults to None. If
required and not provided, it will be autogenerated.
Returns:
bytes: the bytes of the derived data
Raises:
InvalidField: Raised when cryptographic data and/or settings are
unsupported or incompatible with the derivation method.
Example:
>>> engine = CryptographyEngine()
>>> result = engine.derive_key(
... derivation_method=enums.DerivationMethod.HASH,
... derivation_length=16,
... derivation_data=b'abc',
... hash_algorithm=enums.HashingAlgorithm.MD5
... )
>>> result
b'\x90\x01P\x98<\xd2O\xb0\xd6\x96?}(\xe1\x7fr'
"""
if derivation_method == enums.DerivationMethod.ENCRYPT:
result = self.encrypt(
encryption_algorithm=encryption_algorithm,
encryption_key=key_material,
plain_text=derivation_data,
cipher_mode=cipher_mode,
padding_method=padding_method,
iv_nonce=iv_nonce
)
return result.get('cipher_text')
else:
# Handle key derivation functions that use hash algorithms
# Set up the hashing algorithm
if hash_algorithm is None:
raise exceptions.InvalidField("Hash algorithm is required.")
hashing_algorithm = self._encryption_hash_algorithms.get(
hash_algorithm,
None
)
if hashing_algorithm is None:
raise exceptions.InvalidField(
"Hash algorithm '{0}' is not a supported hashing "
"algorithm.".format(hash_algorithm)
)
if derivation_method == enums.DerivationMethod.HMAC:
df = hkdf.HKDF(
algorithm=hashing_algorithm(),
length=derivation_length,
salt=salt,
info=derivation_data,
backend=default_backend()
)
derived_data = df.derive(key_material)
return derived_data
elif derivation_method == enums.DerivationMethod.HASH:
if None not in [derivation_data, key_material]:
raise exceptions.InvalidField(
"For hash-based key derivation, specify only "
"derivation data or key material, not both."
)
elif derivation_data is not None:
hashing_data = derivation_data
elif key_material is not None:
hashing_data = key_material
else:
raise exceptions.InvalidField(
"For hash-based key derivation, derivation data or "
"key material must be specified."
)
df = hashes.Hash(
algorithm=hashing_algorithm(),
backend=default_backend()
)
df.update(hashing_data)
derived_data = df.finalize()
return derived_data
elif derivation_method == enums.DerivationMethod.PBKDF2:
if salt is None:
raise exceptions.InvalidField(
"For PBKDF2 key derivation, salt must be specified."
)
if iteration_count is None:
raise exceptions.InvalidField(
"For PBKDF2 key derivation, iteration count must be "
"specified."
)
df = pbkdf2.PBKDF2HMAC(
algorithm=hashing_algorithm(),
length=derivation_length,
salt=salt,
iterations=iteration_count,
backend=default_backend()
)
derived_data = df.derive(key_material)
return derived_data
elif derivation_method == enums.DerivationMethod.NIST800_108_C:
df = kbkdf.KBKDFHMAC(
algorithm=hashing_algorithm(),
mode=kbkdf.Mode.CounterMode,
length=derivation_length,
rlen=4,
llen=None,
location=kbkdf.CounterLocation.BeforeFixed,
label=None,
context=None,
fixed=derivation_data,
backend=default_backend()
)
derived_data = df.derive(key_material)
return derived_data
else:
raise exceptions.InvalidField(
"Derivation method '{0}' is not a supported key "
"derivation method.".format(derivation_method)
) | 0.001522 |
def _vminFindStart(v,E,Lz,I3V,delta,u0,cosh2u0,sinh2u0,
potu0pi2,pot):
"""
NAME:
_vminFindStart
PURPOSE:
Find adequate start point to solve for vmin
INPUT:
same as JzStaeckelIntegrandSquared
OUTPUT:
rstart
HISTORY:
2012-11-28 - Written - Bovy (IAS)
"""
vtry= 0.9*v
while _JzStaeckelIntegrandSquared(vtry,
E,Lz,I3V,delta,u0,cosh2u0,sinh2u0,
potu0pi2,pot) >= 0. \
and vtry > 0.000000001:
vtry*= 0.9
if vtry < 0.000000001: return 0.
return vtry | 0.032496 |
def natural_name(self) -> str:
"""Valid python identifier representation of the expession."""
name = self.expression.strip()
for op in operators:
name = name.replace(op, operator_to_identifier[op])
return wt_kit.string2identifier(name) | 0.007168 |
def find_nonzero_constrained_reactions(model):
"""Return list of reactions with non-zero, non-maximal bounds."""
lower_bound, upper_bound = helpers.find_bounds(model)
return [rxn for rxn in model.reactions if
0 > rxn.lower_bound > lower_bound or
0 < rxn.upper_bound < upper_bound] | 0.003165 |
def is_playing_shared_game(self, steamID, appid_playing, format=None):
"""Returns valid lender SteamID if game currently played is borrowed.
steamID: The users ID
appid_playing: The game player is currently playing
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'appid_playing' : appid_playing}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'IsPlayingSharedGame', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | 0.007452 |
def dump(exif_dict_original):
"""
py:function:: piexif.load(data)
Return exif as bytes.
:param dict exif: Exif data({"0th":dict, "Exif":dict, "GPS":dict, "Interop":dict, "1st":dict, "thumbnail":bytes})
:return: Exif
:rtype: bytes
"""
exif_dict = copy.deepcopy(exif_dict_original)
header = b"Exif\x00\x00\x4d\x4d\x00\x2a\x00\x00\x00\x08"
exif_is = False
gps_is = False
interop_is = False
first_is = False
if "0th" in exif_dict:
zeroth_ifd = exif_dict["0th"]
else:
zeroth_ifd = {}
if (("Exif" in exif_dict) and len(exif_dict["Exif"]) or
("Interop" in exif_dict) and len(exif_dict["Interop"]) ):
zeroth_ifd[ImageIFD.ExifTag] = 1
exif_is = True
exif_ifd = exif_dict["Exif"]
if ("Interop" in exif_dict) and len(exif_dict["Interop"]):
exif_ifd[ExifIFD. InteroperabilityTag] = 1
interop_is = True
interop_ifd = exif_dict["Interop"]
elif ExifIFD. InteroperabilityTag in exif_ifd:
exif_ifd.pop(ExifIFD.InteroperabilityTag)
elif ImageIFD.ExifTag in zeroth_ifd:
zeroth_ifd.pop(ImageIFD.ExifTag)
if ("GPS" in exif_dict) and len(exif_dict["GPS"]):
zeroth_ifd[ImageIFD.GPSTag] = 1
gps_is = True
gps_ifd = exif_dict["GPS"]
elif ImageIFD.GPSTag in zeroth_ifd:
zeroth_ifd.pop(ImageIFD.GPSTag)
if (("1st" in exif_dict) and
("thumbnail" in exif_dict) and
(exif_dict["thumbnail"] is not None)):
first_is = True
exif_dict["1st"][ImageIFD.JPEGInterchangeFormat] = 1
exif_dict["1st"][ImageIFD.JPEGInterchangeFormatLength] = 1
first_ifd = exif_dict["1st"]
zeroth_set = _dict_to_bytes(zeroth_ifd, "0th", 0)
zeroth_length = (len(zeroth_set[0]) + exif_is * 12 + gps_is * 12 + 4 +
len(zeroth_set[1]))
if exif_is:
exif_set = _dict_to_bytes(exif_ifd, "Exif", zeroth_length)
exif_length = len(exif_set[0]) + interop_is * 12 + len(exif_set[1])
else:
exif_bytes = b""
exif_length = 0
if gps_is:
gps_set = _dict_to_bytes(gps_ifd, "GPS", zeroth_length + exif_length)
gps_bytes = b"".join(gps_set)
gps_length = len(gps_bytes)
else:
gps_bytes = b""
gps_length = 0
if interop_is:
offset = zeroth_length + exif_length + gps_length
interop_set = _dict_to_bytes(interop_ifd, "Interop", offset)
interop_bytes = b"".join(interop_set)
interop_length = len(interop_bytes)
else:
interop_bytes = b""
interop_length = 0
if first_is:
offset = zeroth_length + exif_length + gps_length + interop_length
first_set = _dict_to_bytes(first_ifd, "1st", offset)
thumbnail = _get_thumbnail(exif_dict["thumbnail"])
thumbnail_max_size = 64000
if len(thumbnail) > thumbnail_max_size:
raise ValueError("Given thumbnail is too large. max 64kB")
else:
first_bytes = b""
if exif_is:
pointer_value = TIFF_HEADER_LENGTH + zeroth_length
pointer_str = struct.pack(">I", pointer_value)
key = ImageIFD.ExifTag
key_str = struct.pack(">H", key)
type_str = struct.pack(">H", TYPES.Long)
length_str = struct.pack(">I", 1)
exif_pointer = key_str + type_str + length_str + pointer_str
else:
exif_pointer = b""
if gps_is:
pointer_value = TIFF_HEADER_LENGTH + zeroth_length + exif_length
pointer_str = struct.pack(">I", pointer_value)
key = ImageIFD.GPSTag
key_str = struct.pack(">H", key)
type_str = struct.pack(">H", TYPES.Long)
length_str = struct.pack(">I", 1)
gps_pointer = key_str + type_str + length_str + pointer_str
else:
gps_pointer = b""
if interop_is:
pointer_value = (TIFF_HEADER_LENGTH +
zeroth_length + exif_length + gps_length)
pointer_str = struct.pack(">I", pointer_value)
key = ExifIFD.InteroperabilityTag
key_str = struct.pack(">H", key)
type_str = struct.pack(">H", TYPES.Long)
length_str = struct.pack(">I", 1)
interop_pointer = key_str + type_str + length_str + pointer_str
else:
interop_pointer = b""
if first_is:
pointer_value = (TIFF_HEADER_LENGTH + zeroth_length +
exif_length + gps_length + interop_length)
first_ifd_pointer = struct.pack(">L", pointer_value)
thumbnail_pointer = (pointer_value + len(first_set[0]) + 24 +
4 + len(first_set[1]))
thumbnail_p_bytes = (b"\x02\x01\x00\x04\x00\x00\x00\x01" +
struct.pack(">L", thumbnail_pointer))
thumbnail_length_bytes = (b"\x02\x02\x00\x04\x00\x00\x00\x01" +
struct.pack(">L", len(thumbnail)))
first_bytes = (first_set[0] + thumbnail_p_bytes +
thumbnail_length_bytes + b"\x00\x00\x00\x00" +
first_set[1] + thumbnail)
else:
first_ifd_pointer = b"\x00\x00\x00\x00"
zeroth_bytes = (zeroth_set[0] + exif_pointer + gps_pointer +
first_ifd_pointer + zeroth_set[1])
if exif_is:
exif_bytes = exif_set[0] + interop_pointer + exif_set[1]
return (header + zeroth_bytes + exif_bytes + gps_bytes +
interop_bytes + first_bytes) | 0.000731 |
def lower_backtrack_blocks(match_query, location_types):
"""Lower Backtrack blocks into (QueryRoot, MarkLocation) pairs of blocks."""
# The lowering works as follows:
# 1. Upon seeing a Backtrack block, end the current traversal (if non-empty).
# 2. Start new traversal from the type and location to which the Backtrack pointed.
# 3. If the Backtrack block had an associated MarkLocation, mark that location
# as equivalent to the location where the Backtrack pointed.
new_match_traversals = []
location_translations = dict()
for current_match_traversal in match_query.match_traversals:
new_traversal = []
for step in current_match_traversal:
if not isinstance(step.root_block, Backtrack):
new_traversal.append(step)
else:
# 1. Upon seeing a Backtrack block, end the current traversal (if non-empty).
if new_traversal:
new_match_traversals.append(new_traversal)
new_traversal = []
backtrack_location = step.root_block.location
backtrack_location_type = location_types[backtrack_location]
# 2. Start new traversal from the type and location to which the Backtrack pointed.
new_root_block = QueryRoot({backtrack_location_type.name})
new_as_block = MarkLocation(backtrack_location)
# 3. If the Backtrack block had an associated MarkLocation, mark that location
# as equivalent to the location where the Backtrack pointed.
if step.as_block is not None:
location_translations[step.as_block.location] = backtrack_location
if step.coerce_type_block is not None:
raise AssertionError(u'Encountered type coercion in a MatchStep with '
u'a Backtrack root block, this is unexpected: {} {}'
.format(step, match_query))
new_step = step._replace(root_block=new_root_block, as_block=new_as_block)
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
_flatten_location_translations(location_translations)
new_match_query = match_query._replace(match_traversals=new_match_traversals)
return _translate_equivalent_locations(new_match_query, location_translations) | 0.005663 |
def file_exists(db, user_id, path):
"""
Check if a file exists.
"""
try:
get_file(
db,
user_id,
path,
include_content=False,
decrypt_func=unused_decrypt_func,
)
return True
except NoSuchFile:
return False | 0.003165 |
def _fetch_type_main(cls, cls_in, match):
"""
:type cls_in: type
:type match: _sre.SRE_Match
:rtype: type
"""
return cls._str_to_type(
cls_in,
match.group(cls._SUBMATCH_INDEX_TYPE_MAIN)
) | 0.007435 |
def Lr(self,value):
""" set row rotation """
assert value.shape==(self.N, self.N), 'dimension mismatch'
self._Lr = value
self.clear_cache() | 0.023392 |
def get_img_name(self, band, resolution=None):
"""
:param band: band name
:type band: str
:param resolution: Specifies the resolution in case of Sentinel-2 L2A products
:type resolution: str or None
:return: name of band image file
:rtype: str
"""
band = band.split('/')[-1]
if self.safe_type is EsaSafeType.OLD_TYPE:
name = self.tile_id.rsplit('_', 1)[0] + '_' + band
else:
name = '_'.join([self.tile_id.split('_')[1], self.get_datatake_time(), band])
if self.data_source is DataSource.SENTINEL2_L2A and resolution is not None:
name = '{}_{}'.format(name, resolution.lstrip('R'))
if self.data_source is DataSource.SENTINEL2_L2A and self.baseline <= '02.06':
name = 'L2A_{}'.format(name)
return '{}.jp2'.format(name) | 0.006849 |
def create_blueprint(self, appbuilder, endpoint=None, static_folder=None):
"""
Create Flask blueprint. You will generally not use it
:param appbuilder:
the AppBuilder object
:param endpoint:
endpoint override for this blueprint,
will assume class name if not provided
:param static_folder:
the relative override for static folder,
if omitted application will use the appbuilder static
"""
# Store appbuilder instance
self.appbuilder = appbuilder
# If endpoint name is not provided, get it from the class name
self.endpoint = endpoint or self.__class__.__name__
if self.route_base is None:
self.route_base = "/" + self.__class__.__name__.lower()
self.static_folder = static_folder
if not static_folder:
# Create blueprint and register rules
self.blueprint = Blueprint(
self.endpoint,
__name__,
url_prefix=self.route_base,
template_folder=self.template_folder,
)
else:
self.blueprint = Blueprint(
self.endpoint,
__name__,
url_prefix=self.route_base,
template_folder=self.template_folder,
static_folder=static_folder,
)
self._register_urls()
return self.blueprint | 0.001334 |
def get_mfa(self):
"""Return the currently-valid MFA token for this application."""
token = str(self.totp.now())
# PyOTP doesn't pre-pad tokens shorter than 6 characters
# ROTP does, so we have to.
while len(token) < 6:
token = '0{}'.format(token)
return token | 0.00625 |
def moment_magnitude_scalar(moment):
'''
Uses Hanks & Kanamori formula for calculating moment magnitude from
a scalar moment (Nm)
'''
if isinstance(moment, np.ndarray):
return (2. / 3.) * (np.log10(moment) - 9.05)
else:
return (2. / 3.) * (log10(moment) - 9.05) | 0.003322 |
def generate_base(path: str) -> str:
""" Convert path, which can be a URL or a file path into a base URI
:param path: file location or url
:return: file location or url sans actual name
"""
if ':' in path:
parts = urlparse(path)
parts_dict = parts._asdict()
parts_dict['path'] = os.path.split(parts.path)[0] if '/' in parts.path else ''
return urlunparse(ParseResult(**parts_dict)) + '/'
else:
return (os.path.split(path)[0] if '/' in path else '') + '/' | 0.003854 |
def H11(self):
"Difference entropy."
return -(self.p_xminusy * np.log(self.p_xminusy + self.eps)).sum(1) | 0.016667 |
def update_record_ip(self, ip, domain, name, record_type):
"""Update the IP address(es) for (a) domain(s) specified by type and name.
:param ip: the new IP for the DNS record (ex. '123.1.2.255')
:param domain: the domain where the DNS belongs to (ex. 'example.com')
:param name: the DNS record name to be updated (ex. 'dynamic')
:param record_type: Record type (ex. 'CNAME', 'A'...)
:return: True if no exceptions occurred
"""
records = self.get_records(domain, name=name, record_type=record_type)
data = {'data': str(ip)}
for rec in records:
rec.update(data)
self.update_record(domain, rec)
# If we didn't get any exceptions, return True to let the user know
return True | 0.003774 |
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred."""
# Override for avoid using sys.excepthook PY-12600
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value.args
except ValueError:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
list = traceback.format_exception_only(type, value)
sys.stderr.write(''.join(list)) | 0.002255 |
def _fake_openassociatorinstancepaths(self, namespace, **params):
# pylint: disable=invalid-name
"""
Implements WBEM server responder for
:meth:`~pywbem.WBEMConnection.OpenAssociatorInstancePaths`
with data from the instance repository.
"""
self._validate_namespace(namespace)
self._validate_open_params(**params)
params['ObjectName'] = params['InstanceName']
del params['InstanceName']
result = self._fake_associatornames(namespace, **params)
objects = [] if result is None else [x[2] for x in result[0][2]]
return self._open_response(objects, namespace,
'PullInstancePaths', **params) | 0.004121 |
def always_iterable(obj, base_type=(str, bytes)):
"""If *obj* is iterable, return an iterator over its items::
>>> obj = (1, 2, 3)
>>> list(always_iterable(obj))
[1, 2, 3]
If *obj* is not iterable, return a one-item iterable containing *obj*::
>>> obj = 1
>>> list(always_iterable(obj))
[1]
If *obj* is ``None``, return an empty iterable:
>>> obj = None
>>> list(always_iterable(None))
[]
By default, binary and text strings are not considered iterable::
>>> obj = 'foo'
>>> list(always_iterable(obj))
['foo']
If *base_type* is set, objects for which ``isinstance(obj, base_type)``
returns ``True`` won't be considered iterable.
>>> obj = {'a': 1}
>>> list(always_iterable(obj)) # Iterate over the dict's keys
['a']
>>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
[{'a': 1}]
Set *base_type* to ``None`` to avoid any special handling and treat objects
Python considers iterable as iterable:
>>> obj = 'foo'
>>> list(always_iterable(obj, base_type=None))
['f', 'o', 'o']
"""
if obj is None:
return iter(())
if (base_type is not None) and isinstance(obj, base_type):
return iter((obj,))
try:
return iter(obj)
except TypeError:
return iter((obj,)) | 0.000702 |
def from_blob(cls, blob, stage=0):
""":return: Fully equipped BaseIndexEntry at the given stage"""
return cls((blob.mode, blob.binsha, stage << CE_STAGESHIFT, blob.path)) | 0.010753 |
def convert(self, obj):
"""Takes a dict corresponding to the honeybadgerfish JSON blob of the 1.0.* type and
converts it to BY_ID_HONEY_BADGERFISH version. The object is modified in place
and returned.
"""
if self.pristine_if_invalid:
raise NotImplementedError('pristine_if_invalid option is not supported yet')
nex = get_nexml_el(obj)
assert nex
# Create the new objects as locals. This section should not
# mutate obj, so that if there is an exception the object
# is unchanged on the error exit
otus = _index_list_of_values(nex, 'otus')
o_t = self.convert_otus(otus)
otusById, otusElementOrder = o_t
trees = _get_index_list_of_values(nex, 'trees')
treesById = dict((i['@id'], i) for i in trees)
treesElementOrder = [i['@id'] for i in trees]
if len(treesById) != len(treesElementOrder):
trees_id_set = set()
for tgid in treesElementOrder:
if tgid in trees_id_set:
raise NexsonError('Repeated trees element id "{}"'.format(tgid))
trees_id_set.add(tgid)
tree_id_set = set()
treeContainingObjByTreesId = {}
for tree_group in trees:
# _LOG.debug('converting tree group {} to by_id'.format(tree_group['@id']))
treeById = {}
treeElementOrder = []
tree_array = _get_index_list_of_values(tree_group, 'tree')
for tree in tree_array:
# _LOG.debug('# pre-convert keys = {}'.format(tree.keys()))
t_t = self.convert_tree(tree)
if t_t is None:
continue
tid, tree_alias = t_t # pylint: disable=W0633
if tid in tree_id_set:
raise NexsonError('Repeated tree element id "{}"'.format(tid))
tree_id_set.add(tid)
# _LOG.debug('converting tree {} to by_id'.format(tid))
# _LOG.debug('# post-convert keys = {}'.format(tree.keys()))
assert tree_alias is tree
treeById[tid] = tree
treeElementOrder.append(tid)
treeContainingObjByTreesId[tree_group['@id']] = treeById
tree_group['^ot:treeElementOrder'] = treeElementOrder
# If all that succeeds, add the new object to the dict, creating a fat structure
nex['otusById'] = otusById
nex['^ot:otusElementOrder'] = otusElementOrder
nex['treesById'] = treesById
nex['^ot:treesElementOrder'] = treesElementOrder
for k, v in treeContainingObjByTreesId.items():
treesById[k]['treeById'] = v
nex['@nexml2json'] = str(BY_ID_HONEY_BADGERFISH)
# Make the struct leaner
if self.remove_old_structs:
del nex['otus']
del nex['trees']
for k, v in treesById.items():
if 'tree' in v:
del v['tree']
del v['@id']
return obj | 0.002938 |
def pwa(self, val, wa='WARN'):
""" Print val: WARN in yellow on STDOUT """
self.pstd(self.color.yellow('{}: {}'.format(val, wa))) | 0.013793 |
def rotateCoords(coords, R):
"""
Rotate the list of points using rotation matrix R
:param coords: List of points to be rotated
:param R: Rotation matrix
:return: List of rotated points
"""
newlist = list()
for pp in coords:
rpp = matrixTimesVector(R, pp)
newlist.append(rpp)
return newlist | 0.002933 |
def delete(self, symbol, chunk_range=None, audit=None):
"""
Delete all chunks for a symbol, or optionally, chunks within a range
Parameters
----------
symbol : str
symbol name for the item
chunk_range: range object
a date range to delete
audit: dict
dict to store in the audit log
"""
if chunk_range is not None:
sym = self._get_symbol_info(symbol)
# read out chunks that fall within the range and filter out
# data within the range
df = self.read(symbol, chunk_range=chunk_range, filter_data=False)
row_adjust = len(df)
if not df.empty:
df = CHUNKER_MAP[sym[CHUNKER]].exclude(df, chunk_range)
# remove chunks, and update any remaining data
query = {SYMBOL: symbol}
query.update(CHUNKER_MAP[sym[CHUNKER]].to_mongo(chunk_range))
self._collection.delete_many(query)
self._mdata.delete_many(query)
self.update(symbol, df)
# update symbol metadata (rows and chunk count)
sym = self._get_symbol_info(symbol)
sym[LEN] -= row_adjust
sym[CHUNK_COUNT] = mongo_count(self._collection, filter={SYMBOL: symbol})
self._symbols.replace_one({SYMBOL: symbol}, sym)
else:
query = {SYMBOL: symbol}
self._collection.delete_many(query)
self._symbols.delete_many(query)
self._mdata.delete_many(query)
if audit is not None:
audit['symbol'] = symbol
if chunk_range is not None:
audit['rows_deleted'] = row_adjust
audit['action'] = 'range delete'
else:
audit['action'] = 'symbol delete'
self._audit.insert_one(audit) | 0.001554 |
def infos(cls, fqdn):
""" Display information about hosted certificates for a fqdn. """
if isinstance(fqdn, (list, tuple)):
ids = []
for fqd_ in fqdn:
ids.extend(cls.infos(fqd_))
return ids
ids = cls.usable_id(fqdn)
if not ids:
return []
if not isinstance(ids, (list, tuple)):
ids = [ids]
return [cls.info(id_) for id_ in ids] | 0.004415 |
def addSingleTraitTerm(self,K=None,is_noise=False,normalize=True,Ks=None):
"""
add random effects term for single trait models (no trait-trait covariance matrix)
Args:
K: NxN sample covariance matrix
is_noise: bool labeling the noise term (noise term has K=eye)
normalize: if True, K and Ks are scales such that K.diagonal().mean()==1
Ks: NxN test cross covariance for predictions
"""
assert self.P == 1, 'Incompatible number of traits'
assert K!=None or is_noise, 'Specify covariance structure'
if is_noise:
assert self.noisPos==None, 'noise term already exists'
K = SP.eye(self.Nt)
self.noisPos = self.n_terms
else:
assert K.shape[0]==self.Nt, 'Incompatible shape'
assert K.shape[1]==self.Nt, 'Incompatible shape'
if Ks!=None:
assert Ks.shape[0]==self.N, 'Incompatible shape'
if normalize:
Norm = 1/K.diagonal().mean()
K *= Norm
if Ks!=None: Ks *= Norm
self.vd.addTerm(limix.CSingleTraitTerm(K))
if Ks!=None: self.setKstar(self.n_terms,Ks)
self.n_terms+=1
self.gp = None
self.init = False
self.fast = False
self.optimum = None
self.cache['Sigma'] = None
self.cache['Hessian'] = None
self.cache['Lparams'] = None
self.cache['paramsST']= None | 0.023856 |
def set_logging_formatter(self):
"""
Sets the logging formatter.
"""
for handler in (RuntimeGlobals.logging_console_handler,
RuntimeGlobals.logging_file_handler,
RuntimeGlobals.logging_session_handler):
handler and handler.setFormatter(
RuntimeGlobals.logging_formatters[RuntimeGlobals.logging_active_formatter]) | 0.007126 |
def format(self, attrs, args=None):
"""Format attributes including {} tags with arguments."""
if args is None:
return attrs
out = {}
for key, val in attrs.items():
mba = {'indexer': 'annual'}
# Add formatting {} around values to be able to replace them with _attrs_mapping using format.
for k, v in args.items():
if isinstance(v, six.string_types) and v in self._attrs_mapping.get(key, {}).keys():
mba[k] = '{{{}}}'.format(v)
elif isinstance(v, dict):
if v:
dk, dv = v.copy().popitem()
if dk == 'month':
dv = 'm{}'.format(dv)
mba[k] = '{{{}}}'.format(dv)
else:
mba[k] = int(v) if (isinstance(v, float) and v % 1 == 0) else v
out[key] = val.format(**mba).format(**self._attrs_mapping.get(key, {}))
return out | 0.005894 |
def strip_accents(s, pass_symbols=(u'й', u'Й', u'\n')):
""" Strip accents from a string """
result = []
for char in s:
# Pass these symbols without processing
if char in pass_symbols:
result.append(char)
continue
for c in unicodedata.normalize('NFD', char):
if unicodedata.category(c) == 'Mn':
continue
result.append(c)
return ''.join(result) | 0.002242 |
def max_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Resize an array using max-pooling.
dtype support::
See :func:`imgaug.imgaug.pool`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See `imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after max-pooling.
"""
return pool(arr, block_size, np.max, cval=cval, preserve_dtype=preserve_dtype) | 0.003472 |
def Scharr_edge(im, blurRadius=10, imblur=None):
"""Extract the edges using Scharr kernel (Sobel optimized for rotation
invariance)
Parameters:
-----------
im: 2d array
The image
blurRadius: number, default 10
The gaussian blur raduis (The kernel has size 2*blurRadius+1)
imblur: 2d array, OUT
If not None, will be fille with blurred image
Returns:
--------
out: 2d array
The edges of the images computed with the Scharr algorithm
"""
im = np.asarray(im, dtype='float32')
blurRadius = 2 * blurRadius + 1
im = cv2.GaussianBlur(im, (blurRadius, blurRadius), 0)
Gx = cv2.Scharr(im, -1, 0, 1)
Gy = cv2.Scharr(im, -1, 1, 0)
ret = cv2.magnitude(Gx, Gy)
if imblur is not None and imblur.shape == im.shape:
imblur[:, :] = im
return ret | 0.001182 |
def get_gene(self, gene_name=None, gene_symbol=None, gene_id=None, synonym=None, uniprot_id=None,
pharmgkb_id=None, biogrid_id=None, alt_gene_id=None, limit=None, as_df=False):
"""Get genes
:param bool as_df: if set to True result returns as `pandas.DataFrame`
:param alt_gene_id:
:param str gene_name: gene name
:param str gene_symbol: HGNC gene symbol
:param int gene_id: NCBI Entrez Gene identifier
:param str synonym: Synonym
:param str uniprot_id: UniProt primary accession number
:param str pharmgkb_id: PharmGKB identifier
:param int biogrid_id: BioGRID identifier
:param int limit: maximum of results
:rtype: list[models.Gene]
"""
q = self.session.query(models.Gene)
if gene_symbol:
q = q.filter(models.Gene.gene_symbol.like(gene_symbol))
if gene_name:
q = q.filter(models.Gene.gene_name.like(gene_name))
if gene_id:
q = q.filter(models.Gene.gene_id.like(gene_id))
if synonym:
q = q.join(models.GeneSynonym).filter(models.GeneSynonym.synonym == synonym)
if uniprot_id:
q = q.join(models.GeneUniprot).filter(models.GeneUniprot.uniprot_id == uniprot_id)
if pharmgkb_id:
q = q.join(models.GenePharmgkb).filter(models.GenePharmgkb.pharmgkb_id == pharmgkb_id)
if biogrid_id:
q = q.join(models.GeneBiogrid).filter(models.GeneBiogrid.biogrid_id == biogrid_id)
if alt_gene_id:
q = q.join(models.GeneAltGeneId.alt_gene_id == alt_gene_id)
return self._limit_and_df(q, limit, as_df) | 0.007105 |
def SensorsDataGet(self, sensorIds, parameters):
"""
Retrieve sensor data for the specified sensors from CommonSense.
If SensorsDataGet is successful, the result can be obtained by a call to getResponse(), and should be a json string.
@param sensorIds (list) a list of sensor ids to retrieve the data for
@param parameters (dictionary) - Dictionary containing the parameters for the api call.
@return (bool) - Boolean indicating whether SensorsDataGet was successful.
"""
if parameters is None:
parameters = {}
parameters["sensor_id[]"] = sensorIds
if self.__SenseApiCall__('/sensors/data.json', 'GET', parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False | 0.012458 |
def dtype_repr(dtype):
"""Stringify ``dtype`` for ``repr`` with default for int and float."""
dtype = np.dtype(dtype)
if dtype == np.dtype(int):
return "'int'"
elif dtype == np.dtype(float):
return "'float'"
elif dtype == np.dtype(complex):
return "'complex'"
elif dtype.shape:
return "('{}', {})".format(dtype.base, dtype.shape)
else:
return "'{}'".format(dtype) | 0.00232 |
def process_udp_frame(self,
id=None,
msg=None):
"""process_udp_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: udp frame for packet
"""
# normalize into a dataframe
df = json_normalize(msg)
# convert to a flattened dictionary
dt = json.loads(df.to_json())
flat_msg = {}
for k in dt:
new_key = "udp_{}".format(k)
flat_msg[new_key] = dt[k]["0"]
if new_key not in self.udp_keys:
self.udp_keys[new_key] = k
# end of capturing all unique keys
dt["udp_id"] = id
self.all_udp.append(dt)
log.debug("UDP data updated:")
log.debug(self.udp_keys)
log.debug(self.all_udp)
log.debug("")
return flat_msg | 0.004061 |
def create_parameter_group(self, name, engine='MySQL5.1', description=''):
"""
Create a new dbparameter group for your account.
:type name: string
:param name: The name of the new dbparameter group
:type engine: str
:param engine: Name of database engine.
:type description: string
:param description: The description of the new security group
:rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
:return: The newly created DBSecurityGroup
"""
params = {'DBParameterGroupName': name,
'DBParameterGroupFamily': engine,
'Description' : description}
return self.get_object('CreateDBParameterGroup', params, ParameterGroup) | 0.005208 |
def diff_in_days(start, end):
"""
calculate difference between given dates in days
:param BaseDateTuple start: state date
:param BaseDateTuple end: end date
:return float: difference between end date and start date in days
"""
diff = from_ymd_to_excel(*end.date)-from_ymd_to_excel(*start.date)
return float(diff) | 0.005291 |
def codemirror_script(self, inputid):
"""
Build CodeMirror HTML script tag which contains CodeMirror init.
Arguments:
inputid (string): Input id.
Returns:
string: HTML for field CodeMirror instance.
"""
varname = "{}_codemirror".format(inputid)
html = self.get_codemirror_field_js()
opts = self.codemirror_config()
return html.format(varname=varname, inputid=inputid,
settings=json.dumps(opts, sort_keys=True)) | 0.003731 |
def swap_channels(self, channel_swap):
""" Swaps the two channels specified in the tuple.
Parameters
----------
channel_swap : :obj:`tuple` of int
the two channels to swap
Returns
-------
:obj:`ColorImage`
color image with cols swapped
"""
if len(channel_swap) != 2:
raise ValueError('Illegal value for channel swap')
ci = channel_swap[0]
cj = channel_swap[1]
if ci < 0 or ci > 2 or cj < 0 or cj > 2:
raise ValueError('Channels must be between 0 and 1')
new_data = self.data.copy()
new_data[:, :, ci] = self.data[:, :, cj]
new_data[:, :, cj] = self.data[:, :, ci]
return ColorImage(new_data, frame=self._frame) | 0.002538 |
def get_common_session_key(self, premaster_secret):
"""K = H(S).
Special implementation for Apple TV.
"""
k_1 = self.hash(premaster_secret, b'\x00\x00\x00\x00', as_bytes=True)
k_2 = self.hash(premaster_secret, b'\x00\x00\x00\x01', as_bytes=True)
return k_1 + k_2 | 0.006431 |
def _load(self, scale=1.0):
"""Load the MetImage RSR data for the band requested"""
data = np.genfromtxt(self.requested_band_filename,
unpack=True,
names=['wavenumber',
'response'],
skip_header=4)
# Data are wavenumbers in cm-1:
wavelength = 1. / data['wavenumber'] * 10000.
response = data['response']
# The real MetImage has 24 detectors. However, for now we store the
# single rsr as 'detector-1', indicating that there will be multiple
# detectors in the future:
detectors = {}
detectors['det-1'] = {'wavelength': wavelength, 'response': response}
self.rsr = detectors | 0.002548 |
def are_imaging_dicoms(dicom_input):
"""
This function will check the dicom headers to see which type of series it is
Possibilities are fMRI, DTI, Anatomical (if no clear type is found anatomical is used)
:param dicom_input: directory with dicom files or a list of dicom objects
"""
# if it is philips and multiframe dicom then we assume it is ok
if common.is_philips(dicom_input):
if common.is_multiframe_dicom(dicom_input):
return True
# for all others if there is image position patient we assume it is ok
header = dicom_input[0]
return Tag(0x0020, 0x0037) in header | 0.004747 |
def prepare_dataset(dataset,
formula_id2index,
feature_list,
is_traindata,
do_normalization=False):
"""Transform each instance of dataset to a (Features, Label) tuple."""
prepared = []
start_time = time.time()
translation = []
for i, data in enumerate(dataset):
x = []
handwriting = data['handwriting']
x = handwriting.feature_extraction(feature_list) # Feature selection
y = formula_id2index[data['formula_id']] # Get label
translation.append((handwriting.raw_data_id,
handwriting.formula_in_latex,
handwriting.formula_id))
prepared.append((numpy.array(x), y))
if i % 100 == 0 and i > 0:
utils.print_status(len(dataset), i, start_time)
sys.stdout.write("\r100%" + " "*80 + "\n")
sys.stdout.flush()
# Feature normalization
if do_normalization:
_normalize_features(feature_list, prepared, is_traindata)
return (prepared, translation) | 0.00092 |
def get_commits(self):
'''
Get all commits involving this filename
:returns: List of commits newest to oldest
'''
if not self.is_managed_by_git():
return []
return self.git.get_commits(self.content.source_path, self.follow) | 0.007067 |
def parse_all(self):
"""Parse the __all__ definition in a module."""
assert self.current.value == "__all__"
self.consume(tk.NAME)
if self.current.value != "=":
raise AllError("Could not evaluate contents of __all__. ")
self.consume(tk.OP)
if self.current.value not in "([":
raise AllError("Could not evaluate contents of __all__. ")
self.consume(tk.OP)
self.all = []
all_content = "("
while self.current.kind != tk.OP or self.current.value not in ")]":
if self.current.kind in (tk.NL, tk.COMMENT):
pass
elif self.current.kind == tk.STRING or self.current.value == ",":
all_content += self.current.value
else:
raise AllError(
"Unexpected token kind in __all__: {!r}. ".format(
self.current.kind
)
)
self.stream.move()
self.consume(tk.OP)
all_content += ")"
try:
self.all = eval(all_content, {})
except BaseException as e:
raise AllError(
"Could not evaluate contents of __all__."
"\bThe value was {}. The exception was:\n{}".format(all_content, e)
) | 0.002256 |
def _calculate_states(self, solution, t, step, int_step):
"""!
@brief Calculates new states for neurons using differential calculus. Returns new states for neurons.
@param[in] solution (solve_type): Type solver of the differential equation.
@param[in] t (double): Current time of simulation.
@param[in] step (double): Step of solution at the end of which states of oscillators should be calculated.
@param[in] int_step (double): Step differentiation that is used for solving differential equation.
@return (list) New states for neurons.
"""
next_states = [0] * self._num_osc;
for index in range (0, self._num_osc, 1):
result = odeint(self._neuron_states, self._states[index], numpy.arange(t - step, t, int_step), (index , ));
next_states[index] = result[len(result) - 1][0];
self._outputs = [val for val in self._outputs_buffer];
return next_states; | 0.019905 |
def lraise(self,message):
"""log an exception, close the log file, then raise the exception
Parameters
----------
message : str
the exception message
Raises
------
exception with message
"""
s = str(datetime.now()) + " ERROR: " + message + '\n'
print(s,end='')
if self.filename:
self.f.write(s)
self.f.flush
self.f.close()
raise Exception(message) | 0.008048 |
def weather(api_key, latitude, longitude, date_time=None):
# type:(str, float, float) -> Weather
"""
This is a shortcut method that can be used to perform a basic weather request with the default settings.
:param str api_key: Darksky.net API key
:param float latitude: The requested latitude. Maybe different from the value returned from an API request
:param float longitude: The requested longitude. Maybe different from the value returned from an API request
:param date_time: The requested date/time.
:rtype: Weather
"""
return DarkSky(api_key).weather(latitude, longitude, date_time) | 0.006349 |
def default(self, vid):
""" Defaults the VLAN configuration
.. code-block:: none
default vlan <vlanid>
Args:
vid (str): The VLAN ID to default
Returns:
True if the operation was successful otherwise False
"""
command = 'default vlan %s' % vid
return self.configure(command) if isvlan(vid) else False | 0.005063 |
def from_api_repr(cls, resource, client):
"""Factory: construct a job given its API representation
.. note:
This method assumes that the project found in the resource matches
the client's project.
:type resource: dict
:param resource: dataset job representation returned from the API
:type client: :class:`google.cloud.bigquery.client.Client`
:param client: Client which holds credentials and project
configuration for the dataset.
:rtype: :class:`google.cloud.bigquery.job.LoadJob`
:returns: Job parsed from ``resource``.
"""
config_resource = resource.get("configuration", {})
config = LoadJobConfig.from_api_repr(config_resource)
# A load job requires a destination table.
dest_config = config_resource["load"]["destinationTable"]
ds_ref = DatasetReference(dest_config["projectId"], dest_config["datasetId"])
destination = TableReference(ds_ref, dest_config["tableId"])
# sourceUris will be absent if this is a file upload.
source_uris = _helpers._get_sub_prop(config_resource, ["load", "sourceUris"])
job_ref = _JobReference._from_api_repr(resource["jobReference"])
job = cls(job_ref, source_uris, destination, client, config)
job._set_properties(resource)
return job | 0.002878 |
def _create_new_thread_loop(self):
"""
Create a daemonized thread that will run Tornado IOLoop.
:return: the IOLoop backed by the new thread.
"""
self._thread_loop = ThreadLoop()
if not self._thread_loop.is_ready():
self._thread_loop.start()
return self._thread_loop._io_loop | 0.005831 |
def create_filebase_name(self, group_info, extension='gz', file_name=None):
"""
Return tuple of resolved destination folder name and file name
"""
dirname = self.filebase.formatted_dirname(groups=group_info)
if not file_name:
file_name = self.filebase.prefix_template + '.' + extension
return dirname, file_name | 0.005391 |
def manipulate(self, stored_instance, component_instance):
"""
Manipulates the component instance
:param stored_instance: The iPOPO component StoredInstance
:param component_instance: The component instance
"""
# Store the stored instance
self._ipopo_instance = stored_instance
if self.__controller is None:
# No controller: do nothing
return
# Get the current value of the member (True by default)
controller_value = getattr(component_instance, self.__controller, True)
# Store the controller value
stored_instance.set_controller_state(
self.__controller, controller_value
)
# Prepare the methods names
getter_name = "{0}{1}".format(
ipopo_constants.IPOPO_CONTROLLER_PREFIX,
ipopo_constants.IPOPO_GETTER_SUFFIX,
)
setter_name = "{0}{1}".format(
ipopo_constants.IPOPO_CONTROLLER_PREFIX,
ipopo_constants.IPOPO_SETTER_SUFFIX,
)
# Inject the getter and setter at the instance level
getter, setter = self._field_controller_generator()
setattr(component_instance, getter_name, getter)
setattr(component_instance, setter_name, setter) | 0.001546 |
def get_parameter_vector(self, include_frozen=False):
"""
Get an array of the parameter values in the correct order
Args:
include_frozen (Optional[bool]): Should the frozen parameters be
included in the returned value? (default: ``False``)
"""
if include_frozen:
return self.parameter_vector
return self.parameter_vector[self.unfrozen_mask] | 0.004651 |
def any2unicode(text, encoding='utf8', errors='strict'):
"""Convert a string (bytestring in `encoding` or unicode), to unicode."""
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors) | 0.004202 |
def make_inaturalist_api_get_call(endpoint: str, params: Dict, **kwargs) -> requests.Response:
"""Make an API call to iNaturalist.
endpoint is a string such as 'observations' !! do not put / in front
method: 'GET', 'HEAD', 'POST', 'PUT', 'PATCH', 'DELETE'
kwargs are passed to requests.request
Returns a requests.Response object
"""
headers = {'Accept': 'application/json'}
response = requests.get(urljoin(INAT_NODE_API_BASE_URL, endpoint), params, headers=headers, **kwargs)
return response | 0.005671 |
def example_protos_from_path(path,
num_examples=10,
start_index=0,
parse_examples=True,
sampling_odds=1,
example_class=tf.train.Example):
"""Returns a number of examples from the provided path.
Args:
path: A string path to the examples.
num_examples: The maximum number of examples to return from the path.
parse_examples: If true then parses the serialized proto from the path into
proto objects. Defaults to True.
sampling_odds: Odds of loading an example, used for sampling. When >= 1
(the default), then all examples are loaded.
example_class: tf.train.Example or tf.train.SequenceExample class to load.
Defaults to tf.train.Example.
Returns:
A list of Example protos or serialized proto strings at the path.
Raises:
InvalidUserInputError: If examples cannot be procured from the path.
"""
def append_examples_from_iterable(iterable, examples):
for value in iterable:
if sampling_odds >= 1 or random.random() < sampling_odds:
examples.append(
example_class.FromString(value) if parse_examples else value)
if len(examples) >= num_examples:
return
examples = []
if path.endswith('.csv'):
def are_floats(values):
for value in values:
try:
float(value)
except ValueError:
return False
return True
csv.register_dialect('CsvDialect', skipinitialspace=True)
rows = csv.DictReader(open(path), dialect='CsvDialect')
for row in rows:
if sampling_odds < 1 and random.random() > sampling_odds:
continue
example = tf.train.Example()
for col in row.keys():
# Parse out individual values from vertical-bar-delimited lists
values = [val.strip() for val in row[col].split('|')]
if are_floats(values):
example.features.feature[col].float_list.value.extend(
[float(val) for val in values])
else:
example.features.feature[col].bytes_list.value.extend(
[val.encode('utf-8') for val in values])
examples.append(
example if parse_examples else example.SerializeToString())
if len(examples) >= num_examples:
break
return examples
filenames = filepath_to_filepath_list(path)
compression_types = [
'', # no compression (distinct from `None`!)
'GZIP',
'ZLIB',
]
current_compression_idx = 0
current_file_index = 0
while (current_file_index < len(filenames) and
current_compression_idx < len(compression_types)):
try:
record_iterator = tf.compat.v1.python_io.tf_record_iterator(
path=filenames[current_file_index],
options=tf.io.TFRecordOptions(
compression_types[current_compression_idx]))
append_examples_from_iterable(record_iterator, examples)
current_file_index += 1
if len(examples) >= num_examples:
break
except tf.errors.DataLossError:
current_compression_idx += 1
except (IOError, tf.errors.NotFoundError) as e:
raise common_utils.InvalidUserInputError(e)
if examples:
return examples
else:
raise common_utils.InvalidUserInputError(
'No examples found at ' + path +
'. Valid formats are TFRecord files.') | 0.009624 |
def XOR(a, b, exc=CertifierValueError('Expected at least one certified value')):
"""
Only one arg must not raise a Certifier exception when called overall.
Raise the specified exception on failure.
:params Certifier a:
The first certifiers to call
:params Certifier b:
The second certifiers to call
:param Exception exc:
Callable that is raised if XOR fails.
"""
errors = []
for certifier in [a, b]:
try:
certifier()
except CertifierError as e:
errors.append(e)
if len(errors) != 1:
if exc is not None:
raise exc | 0.00313 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.