text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def build_napp_package(napp_name):
"""Build the .napp file to be sent to the napps server.
Args:
napp_identifier (str): Identifier formatted as
<username>/<napp_name>
Return:
file_payload (binary): The binary representation of the napp
package that will be POSTed to the napp server.
"""
ignored_extensions = ['.swp', '.pyc', '.napp']
ignored_dirs = ['__pycache__', '.git', '.tox']
files = os.listdir()
for filename in files:
if os.path.isfile(filename) and '.' in filename and \
filename.rsplit('.', 1)[1] in ignored_extensions:
files.remove(filename)
elif os.path.isdir(filename) and filename in ignored_dirs:
files.remove(filename)
# Create the '.napp' package
napp_file = tarfile.open(napp_name + '.napp', 'x:xz')
for local_f in files:
napp_file.add(local_f)
napp_file.close()
# Get the binary payload of the package
file_payload = open(napp_name + '.napp', 'rb')
# remove the created package from the filesystem
os.remove(napp_name + '.napp')
return file_payload | 0.001595 |
def validate(self, expected_type, is_array, val):
"""
Validates that the expected type matches the value
Returns two element tuple: (bool, string)
- `bool` - True if valid, False if not
- `string` - Description of validation error, or None if valid
:Parameters:
expected_type
string name of the type expected. This may be a Barrister primitive, or a user defined type.
is_array
If True then require that the val be a list
val
Value to validate against the expected type
"""
if val == None:
if expected_type.optional:
return True, None
else:
return False, "Value cannot be null"
elif is_array:
if not isinstance(val, list):
return self._type_err(val, "list")
else:
for v in val:
ok, msg = self.validate(expected_type, False, v)
if not ok:
return ok, msg
elif expected_type.type == "int":
if not isinstance(val, (long, int)):
return self._type_err(val, "int")
elif expected_type.type == "float":
if not isinstance(val, (float, int, long)):
return self._type_err(val, "float")
elif expected_type.type == "bool":
if not isinstance(val, bool):
return self._type_err(val, "bool")
elif expected_type.type == "string":
if not isinstance(val, (str, unicode)):
return self._type_err(val, "string")
else:
return self.get(expected_type.type).validate(val)
return True, None | 0.002286 |
def delete_processing_block(processing_block_id):
"""Delete Processing Block with the specified ID"""
scheduling_block_id = processing_block_id.split(':')[0]
config = get_scheduling_block(scheduling_block_id)
processing_blocks = config.get('processing_blocks')
processing_block = list(filter(
lambda x: x.get('id') == processing_block_id, processing_blocks))[0]
config['processing_blocks'].remove(processing_block)
DB.set('scheduling_block/{}'.format(config['id']), json.dumps(config))
# Add a event to the scheduling block event list to notify
# of a new scheduling block being added to the db.
DB.rpush('processing_block_events',
json.dumps(dict(type="deleted", id=processing_block_id))) | 0.001332 |
def _register_if_needed(cls, run_object):
"""Registers Trainable or Function at runtime.
Assumes already registered if run_object is a string. Does not
register lambdas because they could be part of variant generation.
Also, does not inspect interface of given run_object.
Arguments:
run_object (str|function|class): Trainable to run. If string,
assumes it is an ID and does not modify it. Otherwise,
returns a string corresponding to the run_object name.
Returns:
A string representing the trainable identifier.
"""
if isinstance(run_object, six.string_types):
return run_object
elif isinstance(run_object, types.FunctionType):
if run_object.__name__ == "<lambda>":
logger.warning(
"Not auto-registering lambdas - resolving as variant.")
return run_object
else:
name = run_object.__name__
register_trainable(name, run_object)
return name
elif isinstance(run_object, type):
name = run_object.__name__
register_trainable(name, run_object)
return name
else:
raise TuneError("Improper 'run' - not string nor trainable.") | 0.001483 |
def set_fft_params(func):
"""Decorate a method to automatically convert quantities to samples
"""
@wraps(func)
def wrapped_func(series, method_func, *args, **kwargs):
"""Wrap function to normalize FFT params before execution
"""
if isinstance(series, tuple):
data = series[0]
else:
data = series
# normalise FFT parmeters for all libraries
normalize_fft_params(data, kwargs=kwargs, func=method_func)
return func(series, method_func, *args, **kwargs)
return wrapped_func | 0.001748 |
def error(self, i: int=None) -> str:
"""
Returns an error message
"""
head = "[" + colors.red("error") + "]"
if i is not None:
head = str(i) + " " + head
return head | 0.017778 |
def segments(self, index=None, params=None):
"""
The segments command is the detailed view of Lucene segments per index.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-segments.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg bytes: The unit in which to display byte values, valid choices are:
'b', 'k', 'kb', 'm', 'mb', 'g', 'gb', 't', 'tb', 'p', 'pb'
:arg format: a short version of the Accept header, e.g. json, yaml
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg s: Comma-separated list of column names or column aliases to sort
by
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat',
'segments', index), params=params) | 0.00409 |
def _process_stocks(self, limit):
"""
Stock definitions.
Here we instantiate them as instances of the given taxon.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, 'stock'))
LOG.info("building labels for stocks")
with open(raw, 'r') as f:
f.readline() # read the header row; skip
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
for line in filereader:
line_counter += 1
(stock_id, dbxref_id, organism_id, name, uniquename,
description, type_id, is_obsolete) = line
# 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670
stock_num = stock_id
stock_id = 'FlyBase:'+uniquename
self.idhash['stock'][stock_num] = stock_id
stock_label = description
organism_key = organism_id
taxon = self.idhash['organism'][organism_key]
# from what i can tell, the dbxrefs are just more FBst,
# so no added information vs uniquename
if not self.test_mode and limit is not None and line_counter > limit:
pass
else:
if self.test_mode \
and int(stock_num) not in self.test_keys['strain']:
continue
# tax_label = self.label_hash[taxon] # unused
# add the tax in case it hasn't been already
model.addClassToGraph(taxon)
model.addIndividualToGraph(stock_id, stock_label, taxon)
if is_obsolete == 't':
model.addDeprecatedIndividual(stock_id)
return | 0.001998 |
def parse_data_port_mappings(mappings, default_bridge='br-data'):
"""Parse data port mappings.
Mappings must be a space-delimited list of bridge:port.
Returns dict of the form {port:bridge} where ports may be mac addresses or
interface names.
"""
# NOTE(dosaboy): we use rvalue for key to allow multiple values to be
# proposed for <port> since it may be a mac address which will differ
# across units this allowing first-known-good to be chosen.
_mappings = parse_mappings(mappings, key_rvalue=True)
if not _mappings or list(_mappings.values()) == ['']:
if not mappings:
return {}
# For backwards-compatibility we need to support port-only provided in
# config.
_mappings = {mappings.split()[0]: default_bridge}
ports = _mappings.keys()
if len(set(ports)) != len(ports):
raise Exception("It is not allowed to have the same port configured "
"on more than one bridge")
return _mappings | 0.000981 |
def fqwe(fEM, time, freq, qweargs):
r"""Fourier Transform using Quadrature-With-Extrapolation.
It follows the QWE methodology [Key12]_ for the Hankel transform, see
``hqwe`` for more information.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
This function is based on ``get_CSEM1D_TD_QWE.m`` from the source code
distributed with [Key12]_.
``fqwe`` checks how steep the decay of the frequency-domain result is, and
calls QUAD for the very steep interval, for which QWE is not suited.
Returns
-------
tEM : array
Returns time-domain EM response of ``fEM`` for given ``time``.
conv : bool
If true, QWE/QUAD converged. If not, <ftarg> might have to be adjusted.
"""
# Get rtol, atol, nquad, maxint, diff_quad, a, b, and limit
rtol, atol, nquad, maxint, _, diff_quad, a, b, limit, sincos = qweargs
# Calculate quadrature intervals for all offset
xint = np.concatenate((np.array([1e-20]), np.arange(1, maxint+1)*np.pi))
if sincos == np.cos: # Adjust zero-crossings if cosine-transform
xint[1:] -= np.pi/2
intervals = xint/time[:, None]
# Get Gauss Quadrature Weights
g_x, g_w = special.p_roots(nquad)
# Pre-compute the Bessel functions at fixed quadrature points, multiplied
# by the corresponding Gauss quadrature weight.
dx = np.repeat(np.diff(xint)/2, nquad)
Bx = dx*(np.tile(g_x, maxint) + 1) + np.repeat(xint[:-1], nquad)
SS = sincos(Bx)*np.tile(g_w, maxint)
# Interpolate in frequency domain
tEM_rint = iuSpline(np.log(2*np.pi*freq), fEM.real)
tEM_iint = iuSpline(np.log(2*np.pi*freq), -fEM.imag)
# Check if we use QWE or SciPy's QUAD
# If there are any steep decays within an interval we have to use QUAD, as
# QWE is not designed for these intervals.
check0 = np.log(intervals[:, :-1])
check1 = np.log(intervals[:, 1:])
doqwe = np.all((np.abs(tEM_rint(check0) + 1j*tEM_iint(check0)) /
np.abs(tEM_rint(check1) + 1j*tEM_iint(check1)) < diff_quad),
1)
# Choose imaginary part if sine-transform, else real part
if sincos == np.sin:
tEM_int = tEM_iint
else:
tEM_int = tEM_rint
# Set quadargs if not given:
if not limit:
limit = maxint
if not a:
a = intervals[:, 0]
else:
a = a*np.ones(time.shape)
if not b:
b = intervals[:, -1]
else:
b = b*np.ones(time.shape)
# Pre-allocate output array
tEM = np.zeros(time.size)
conv = True
# Carry out SciPy's Quad if required
if np.any(~doqwe):
def sEMquad(w, t):
r"""Return scaled, interpolated value of tEM_int for ``w``."""
return tEM_int(np.log(w))*sincos(w*t)
# Loop over times that require QUAD
for i in np.where(~doqwe)[0]:
out = integrate.quad(sEMquad, a[i], b[i], (time[i],), 1, atol,
rtol, limit)
tEM[i] = out[0]
# If there is a fourth output from QUAD, it means it did not conv.
if len(out) > 3:
conv *= False
# Carry out QWE for 'well-behaved' intervals
if np.any(doqwe):
sEM = tEM_int(np.log(Bx/time[doqwe, None]))*SS
tEM[doqwe], _, tc = qwe(rtol, atol, maxint, sEM, intervals[doqwe, :])
conv *= tc
return tEM, conv | 0.000285 |
def find_element_by_jquery(step, browser, selector):
"""Find a single HTML element using jQuery-style selectors."""
elements = find_elements_by_jquery(browser, selector)
assert_true(step, len(elements) > 0)
return elements[0] | 0.004149 |
def check_readable(self, timeout):
"""
Poll ``self.stdout`` and return True if it is readable.
:param float timeout: seconds to wait I/O
:return: True if readable, else False
:rtype: boolean
"""
rlist, wlist, xlist = select.select([self._stdout], [], [], timeout)
return bool(len(rlist)) | 0.005682 |
def find_melody(file='440_480_clean.wav', chunksize=512):
"""Cut the sample into chunks and analyze each chunk.
Return a list [(Note, chunks)] where chunks is the number of chunks
where that note is the most dominant.
If two consequent chunks turn out to return the same Note they are
grouped together.
This is an experimental function.
"""
(data, freq, bits) = data_from_file(file)
res = []
for d in analyze_chunks(data, freq, bits, chunksize):
if res != []:
if res[-1][0] == d:
val = res[-1][1]
res[-1] = (d, val + 1)
else:
res.append((d, 1))
else:
res.append((d, 1))
return [(x, freq) for (x, freq) in res] | 0.001323 |
def diff(section):
"""
For each section defined in the local config file, look up for a folder inside the local config folder
named after the section. Uploads the environemnt file named as in the S3CONF variable for this section
to the remote S3CONF path.
"""
try:
settings = config.Settings(section=section)
storage = STORAGES['s3'](settings=settings)
conf = s3conf.S3Conf(storage=storage, settings=settings)
local_root = os.path.join(config.LOCAL_CONFIG_FOLDER, section)
click.echo(''.join(conf.diff(local_root)))
except exceptions.EnvfilePathNotDefinedError:
raise exceptions.EnvfilePathNotDefinedUsageError() | 0.00436 |
def _make_read_lob_request(self, readoffset, readlength):
"""Make low level request to HANA database (READLOBREQUEST).
Compose request message with proper parameters and read lob data from second part object of reply.
"""
self._connection._check_closed()
request = RequestMessage.new(
self._connection,
RequestSegment(
message_types.READLOB,
(ReadLobRequest(self._lob_header.locator_id, readoffset, readlength),)
)
)
response = self._connection.send_request(request)
# The segment of the message contains two parts.
# 1) StatementContext -> ignored for now
# 2) ReadLobReply -> contains some header information and actual LOB data
data_part = response.segments[0].parts[1]
# return actual lob container (BytesIO/TextIO):
return data_part.data | 0.005459 |
def topics(text):
"""Return a list of topics."""
detectors = [
detector_50_Cent
]
ts = []
for detector in detectors:
ts.append(detector(text))
return [t[0] for t in ts if t[1] > 0.95] | 0.004464 |
def get_transitions_for(brain_or_object):
"""List available workflow transitions for all workflows
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: All possible available and allowed transitions
:rtype: list[dict]
"""
workflow = get_tool('portal_workflow')
transitions = []
instance = get_object(brain_or_object)
for wfid in get_workflows_for(brain_or_object):
wf = workflow[wfid]
tlist = wf.getTransitionsFor(instance)
transitions.extend([t for t in tlist if t not in transitions])
return transitions | 0.001499 |
def dim_iter(self, *dim_strides, **kwargs):
"""
Recursively iterate over the (dimension, stride)
tuples specified in dim_strides, returning a tuple
of dictionaries describing a dimension update.
For example, the following call effectively produces
2 loops over the 'ntime' and 'nchan' dimensions
in chunks of 10 and 4 respectively.
.. code-block:: python
for d in cube.dim_iter(('ntime', 10), ('nchan', 4))
cube.update_dimensions(d)
Parameters
----------
*dim_stride: list
list of (dimension, stride) tuples
Returns
-------
iterator
Iterator produces dictionaries describing dimensions updates.
:code:`{'name':'ntime', 'lower_extent': 100, 'upper_extent': 200 }`
"""
# Extract dimension names
dims = [ds[0] for ds in dim_strides]
def _create_dim_dicts(*args):
return tuple({ 'name': d,
'lower_extent': s,
'upper_extent': e
} for (d, (s, e)) in args)
# Return a tuple-dict-creating generator
return (_create_dim_dicts(*zip(dims, s)) for s
in self.endpoint_iter(*dim_strides, **kwargs)) | 0.005397 |
def save(self):
"""
Save current state of config dictionary.
"""
with open(self.config_file, "w") as f:
f.write(dump(self.config, default_flow_style=False)) | 0.014925 |
def copy_file(aws_access_key_id, aws_secret_access_key, bucket_name, file, s3_folder):
"""
copies file to bucket s3_folder
"""
# Connect to the bucket
bucket = s3_bucket(aws_access_key_id, aws_secret_access_key, bucket_name)
key = boto.s3.key.Key(bucket)
if s3_folder:
target_name = '%s/%s' % (s3_folder, os.path.basename(file))
else:
target_name = os.path.basename(file)
key.key = target_name
print('Uploading %s to %s' % (file, target_name))
key.set_contents_from_filename(file)
print('Upload %s FINISHED: %s' % (file, dt.now())) | 0.003333 |
def duplicate(self, new_parent=None):
"Create a new object exactly similar to self"
kwargs = {}
for spec_name, spec in self._meta.specs.items():
value = getattr(self, spec_name)
if isinstance(value, Color):
print "COLOR", value, value.default
if value.default:
value = None
if value is not None:
kwargs[spec_name] = value
del kwargs['parent']
new_id = wx.NewId()
kwargs['id'] = new_id
kwargs['name'] = "%s_%s" % (kwargs['name'], new_id)
new_obj = self.__class__(new_parent or self.get_parent(), **kwargs)
# recursively create a copy of each child (in the new parent!)
for child in self:
child.duplicate(new_obj)
return new_obj | 0.003529 |
def get_timeout(self):
"""
Checks if any timeout for the requests to DigitalOcean is required.
To set a timeout, use the REQUEST_TIMEOUT_ENV_VAR environment
variable.
"""
timeout_str = os.environ.get(REQUEST_TIMEOUT_ENV_VAR)
if timeout_str:
try:
return float(timeout_str)
except:
self._log.error('Failed parsing the request read timeout of '
'"%s". Please use a valid float number!' %
timeout_str)
return None | 0.006525 |
def get_turn_delta(self, branch=None, turn=None, tick_from=0, tick_to=None):
"""Get a dictionary describing changes made on a given turn.
If ``tick_to`` is not supplied, report all changes after ``tick_from``
(default 0).
The keys are graph names. Their values are dictionaries of the graphs'
attributes' new values, with ``None`` for deleted keys. Also in those graph
dictionaries are special keys 'node_val' and 'edge_val' describing changes
to node and edge attributes, and 'nodes' and 'edges' full of booleans
indicating whether a node or edge exists.
:arg branch: A branch of history; defaults to the current branch
:arg turn: The turn in the branch; defaults to the current turn
:arg tick_from: Starting tick; defaults to 0
"""
branch = branch or self.branch
turn = turn or self.turn
tick_to = tick_to or self.tick
delta = {}
if tick_from < tick_to:
gvbranches = self._graph_val_cache.settings
nbranches = self._nodes_cache.settings
nvbranches = self._node_val_cache.settings
ebranches = self._edges_cache.settings
evbranches = self._edge_val_cache.settings
else:
gvbranches = self._graph_val_cache.presettings
nbranches = self._nodes_cache.presettings
nvbranches = self._node_val_cache.presettings
ebranches = self._edges_cache.presettings
evbranches = self._edge_val_cache.presettings
if branch in gvbranches and turn in gvbranches[branch]:
for graph, key, value in gvbranches[branch][turn][tick_from:tick_to]:
if graph in delta:
delta[graph][key] = value
else:
delta[graph] = {key: value}
if branch in nbranches and turn in nbranches[branch]:
for graph, node, exists in nbranches[branch][turn][tick_from:tick_to]:
delta.setdefault(graph, {}).setdefault('nodes', {})[node] = bool(exists)
if branch in nvbranches and turn in nvbranches[branch]:
for graph, node, key, value in nvbranches[branch][turn][tick_from:tick_to]:
if (
graph in delta and 'nodes' in delta[graph] and
node in delta[graph]['nodes'] and not delta[graph]['nodes'][node]
):
continue
nodevd = delta.setdefault(graph, {}).setdefault('node_val', {})
if node in nodevd:
nodevd[node][key] = value
else:
nodevd[node] = {key: value}
graph_objs = self._graph_objs
if branch in ebranches and turn in ebranches[branch]:
for graph, orig, dest, idx, exists in ebranches[branch][turn][tick_from:tick_to]:
if graph_objs[graph].is_multigraph():
if (
graph in delta and 'edges' in delta[graph] and
orig in delta[graph]['edges'] and dest in delta[graph]['edges'][orig]
and idx in delta[graph]['edges'][orig][dest]
and not delta[graph]['edges'][orig][dest][idx]
):
continue
delta.setdefault(graph, {}).setdefault('edges', {})\
.setdefault(orig, {}).setdefault(dest, {})[idx] = bool(exists)
else:
if (
graph in delta and 'edges' in delta[graph] and
orig in delta[graph]['edges'] and dest in delta[graph]['edges'][orig]
and not delta[graph]['edges'][orig][dest]
):
continue
delta.setdefault(graph, {}).setdefault('edges', {})\
.setdefault(orig, {})[dest] = bool(exists)
if branch in evbranches and turn in evbranches[branch]:
for graph, orig, dest, idx, key, value in evbranches[branch][turn][tick_from:tick_to]:
edgevd = delta.setdefault(graph, {}).setdefault('edge_val', {})\
.setdefault(orig, {}).setdefault(dest, {})
if graph_objs[graph].is_multigraph():
if idx in edgevd:
edgevd[idx][key] = value
else:
edgevd[idx] = {key: value}
else:
edgevd[key] = value
return delta | 0.003277 |
def add_field(self, model, field):
"""Ran when a field is added to a model."""
super(SchemaEditor, self).add_field(model, field)
for mixin in self.post_processing_mixins:
mixin.add_field(model, field) | 0.008403 |
async def set_version(self, tp, params, version=None, elem=None):
"""
Stores version to the stream if not stored yet
:param tp:
:param params:
:param version:
:param elem:
:return:
"""
self.registry.set_tr(None)
tw = TypeWrapper(tp, params)
if not tw.is_versioned():
return TypeWrapper.ELEMENTARY_RES
# If not in the DB, store to the archive at the current position
if not self.version_db.is_versioned(tw):
if version is None:
version = self._cur_version(tw, elem)
await dump_uvarint(self.iobj, 0)
await dump_uvarint(self.iobj, version)
self.version_db.set_version(tw, 0, version)
return self.version_db.get_version(tw)[1] | 0.002457 |
def _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics,
cors, target_version=None, delete_retention_policy=None, static_website=None):
'''
<?xml version="1.0" encoding="utf-8"?>
<StorageServiceProperties>
<Logging>
<Version>version-number</Version>
<Delete>true|false</Delete>
<Read>true|false</Read>
<Write>true|false</Write>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</Logging>
<HourMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</HourMetrics>
<MinuteMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</MinuteMetrics>
<Cors>
<CorsRule>
<AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
<AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
<MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
<ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
<AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
</CorsRule>
</Cors>
<DeleteRetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</DeleteRetentionPolicy>
<StaticWebsite>
<Enabled>true|false</Enabled>
<IndexDocument></IndexDocument>
<ErrorDocument404Path></ErrorDocument404Path>
</StaticWebsite>
</StorageServiceProperties>
'''
service_properties_element = ETree.Element('StorageServiceProperties')
# Logging
if logging:
logging_element = ETree.SubElement(service_properties_element, 'Logging')
ETree.SubElement(logging_element, 'Version').text = logging.version
ETree.SubElement(logging_element, 'Delete').text = str(logging.delete)
ETree.SubElement(logging_element, 'Read').text = str(logging.read)
ETree.SubElement(logging_element, 'Write').text = str(logging.write)
retention_element = ETree.SubElement(logging_element, 'RetentionPolicy')
_convert_retention_policy_to_xml(logging.retention_policy, retention_element)
# HourMetrics
if hour_metrics:
hour_metrics_element = ETree.SubElement(service_properties_element, 'HourMetrics')
_convert_metrics_to_xml(hour_metrics, hour_metrics_element)
# MinuteMetrics
if minute_metrics:
minute_metrics_element = ETree.SubElement(service_properties_element, 'MinuteMetrics')
_convert_metrics_to_xml(minute_metrics, minute_metrics_element)
# CORS
# Make sure to still serialize empty list
if cors is not None:
cors_element = ETree.SubElement(service_properties_element, 'Cors')
for rule in cors:
cors_rule = ETree.SubElement(cors_element, 'CorsRule')
ETree.SubElement(cors_rule, 'AllowedOrigins').text = ",".join(rule.allowed_origins)
ETree.SubElement(cors_rule, 'AllowedMethods').text = ",".join(rule.allowed_methods)
ETree.SubElement(cors_rule, 'MaxAgeInSeconds').text = str(rule.max_age_in_seconds)
ETree.SubElement(cors_rule, 'ExposedHeaders').text = ",".join(rule.exposed_headers)
ETree.SubElement(cors_rule, 'AllowedHeaders').text = ",".join(rule.allowed_headers)
# Target version
if target_version:
ETree.SubElement(service_properties_element, 'DefaultServiceVersion').text = target_version
# DeleteRetentionPolicy
if delete_retention_policy:
policy_element = ETree.SubElement(service_properties_element, 'DeleteRetentionPolicy')
ETree.SubElement(policy_element, 'Enabled').text = str(delete_retention_policy.enabled)
if delete_retention_policy.enabled:
ETree.SubElement(policy_element, 'Days').text = str(delete_retention_policy.days)
# StaticWebsite
if static_website:
static_website_element = ETree.SubElement(service_properties_element, 'StaticWebsite')
ETree.SubElement(static_website_element, 'Enabled').text = str(static_website.enabled)
if static_website.enabled:
if static_website.index_document is not None:
ETree.SubElement(static_website_element, 'IndexDocument').text = str(static_website.index_document)
if static_website.error_document_404_path is not None:
ETree.SubElement(static_website_element, 'ErrorDocument404Path').text = \
str(static_website.error_document_404_path)
# Add xml declaration and serialize
try:
stream = BytesIO()
ETree.ElementTree(service_properties_element).write(stream, xml_declaration=True, encoding='utf-8',
method='xml')
except:
raise
finally:
output = stream.getvalue()
stream.close()
return output | 0.003953 |
def get_task_workers(self, task_name=None, json_obj=None):
"""
Get workers from task
:param task_name: task name
:param json_obj: json object parsed from get_task_detail2
:return: list of workers
.. seealso:: :class:`odps.models.Worker`
"""
if task_name is None and json_obj is None:
raise ValueError('Either task_name or json_obj should be provided')
if json_obj is None:
json_obj = self.get_task_detail2(task_name)
return WorkerDetail2.extract_from_json(json_obj, client=self._client, parent=self) | 0.004959 |
def FromJsonString(self, value):
"""Converts a string to Duration.
Args:
value: A string to be converted. The string must end with 's'. Any
fractional digits (or none) are accepted as long as they fit into
precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s
Raises:
ParseError: On parsing problems.
"""
if len(value) < 1 or value[-1] != 's':
raise ParseError(
'Duration must end with letter "s": {0}.'.format(value))
try:
pos = value.find('.')
if pos == -1:
self.seconds = int(value[:-1])
self.nanos = 0
else:
self.seconds = int(value[:pos])
if value[0] == '-':
self.nanos = int(round(float('-0{0}'.format(value[pos: -1])) *1e9))
else:
self.nanos = int(round(float('0{0}'.format(value[pos: -1])) *1e9))
except ValueError:
raise ParseError(
'Couldn\'t parse duration: {0}.'.format(value)) | 0.01032 |
def _transform_to_2d(t):
"""Convert vectors to column matrices, to always have a 2d shape."""
t = np.asarray(t)
dim = len(t.shape)
assert dim <= 2
if dim < 2:
t = np.atleast_2d(t).T
return t | 0.004444 |
def get_instrument_variables(ds):
'''
Returns a list of instrument variables
:param netCDF4.Dataset ds: An open netCDF4 Dataset
'''
candidates = []
for variable in ds.variables:
instrument = getattr(ds.variables[variable], 'instrument', '')
if instrument and instrument in ds.variables:
if instrument not in candidates:
candidates.append(instrument)
instrument = getattr(ds, 'instrument', '')
if instrument and instrument in ds.variables:
if instrument not in candidates:
candidates.append(instrument)
return candidates | 0.00161 |
def transform(self, X):
"""Convert categorical columns to numeric values.
Parameters
----------
X : pandas.DataFrame
Data to encode.
Returns
-------
Xt : pandas.DataFrame
Encoded data.
"""
check_is_fitted(self, "encoded_columns_")
check_columns_exist(X.columns, self.feature_names_)
Xt = X.copy()
for col, cat in self.categories_.items():
Xt[col].cat.set_categories(cat, inplace=True)
new_data = self._encode(Xt, self.feature_names_)
return new_data.loc[:, self.encoded_columns_] | 0.00317 |
def get_unit_waveforms(recording, sorting, unit_ids=None, grouping_property=None, start_frame=None, end_frame=None,
ms_before=3., ms_after=3., dtype=None, max_num_waveforms=np.inf, filter=False,
bandpass=[300, 6000], save_as_features=True, verbose=False, compute_property_from_recording=False):
'''
This function returns the spike waveforms from the specified unit_ids from t_start and t_stop
in the form of a numpy array of spike waveforms.
Parameters
----------
recording
sorting
unit_ids
grouping_property
start_frame
end_frame
ms_before
ms_after
dtype
max_num_waveforms
filter
bandpass
save_as_features
verbose
compute_property_from_recording
Returns
-------
waveforms
'''
if isinstance(unit_ids, (int, np.integer)):
unit_ids = [unit_ids]
elif unit_ids is None:
unit_ids = sorting.get_unit_ids()
elif not isinstance(unit_ids, (list, np.ndarray)):
raise Exception("unit_ids is not a valid in valid")
if dtype is None:
dtype = np.float32
waveform_list = []
if grouping_property is not None:
if grouping_property not in recording.get_channel_property_names():
raise ValueError("'grouping_property' should be a property of recording extractors")
if compute_property_from_recording:
compute_sorting_group = True
elif grouping_property not in sorting.get_unit_property_names():
print(grouping_property, ' not in sorting extractor. Computing it from the recording extractor')
compute_sorting_group = True
else:
compute_sorting_group = False
print("Waveforms by property: ", grouping_property)
if not compute_sorting_group:
rec_list, rec_props = se.get_sub_extractors_by_property(recording, grouping_property, return_property_list=True)
sort_list, sort_props = se.get_sub_extractors_by_property(sorting, grouping_property, return_property_list=True)
if len(rec_props) != len(sort_props):
print('Different' + grouping_property + ' numbers: using largest number of ' + grouping_property)
if len(rec_props) > len(sort_props):
for i_r, rec in enumerate(rec_props):
if rec not in sort_props:
print('Inserting None for property ', rec)
sort_list.insert(i_r, None)
else:
for i_s, sort in enumerate(sort_props):
if sort not in rec_props:
rec_list.insert(i_s, None)
else:
assert len(rec_list) == len(sort_list)
for i_list, (rec, sort) in enumerate(zip(rec_list, sort_list)):
for i, unit_id in enumerate(unit_ids):
# ts_ = time.time()
if sort is not None and rec is not None:
if unit_id in sort.get_unit_ids():
if not filter:
rec = rec
else:
rec = st.preprocessing.bandpass_filter(recording=rec, freq_min=bandpass[0],
freq_max=bandpass[1])
fs = rec.get_sampling_frequency()
n_pad = [int(ms_before * fs / 1000), int(ms_after * fs / 1000)]
if verbose:
print('Waveform ' + str(i + 1) + '/' + str(len(unit_ids)))
waveforms, indices = _get_random_spike_waveforms(recording=rec,
sorting=sort,
unit=unit_id,
max_num=max_num_waveforms,
snippet_len=n_pad)
waveforms = waveforms.swapaxes(0, 2)
waveforms = waveforms.swapaxes(1, 2)
waveforms = waveforms.astype(dtype)
if save_as_features:
if len(indices) < len(sort.get_unit_spike_train(unit_id)):
if 'waveforms' not in sorting.get_unit_spike_feature_names(unit_id):
features = np.array([None] * len(sorting.get_unit_spike_train(unit_id)))
else:
features = np.array(sorting.get_unit_spike_features(unit_id, 'waveforms'))
for i, ind in enumerate(indices):
features[ind] = waveforms[i]
else:
features = waveforms
sorting.set_unit_spike_features(unit_id, 'waveforms', features)
waveform_list.append(waveforms)
else:
for i, unit_id in enumerate(unit_ids):
# ts_ = time.time()
if unit_id in sorting.get_unit_ids():
rec_groups = np.array([recording.get_channel_property(ch, grouping_property)
for ch in recording.get_channel_ids()])
if not filter:
rec = recording
else:
rec = st.preprocessing.bandpass_filter(recording=recording, freq_min=bandpass[0],
freq_max=bandpass[1])
fs = rec.get_sampling_frequency()
n_pad = [int(ms_before * fs / 1000), int(ms_after * fs / 1000)]
if verbose:
print('Waveform ' + str(i + 1) + '/' + str(len(unit_ids)))
waveforms, indices = _get_random_spike_waveforms(recording=recording,
sorting=sorting,
unit=unit_id,
max_num=max_num_waveforms,
snippet_len=n_pad)
waveforms = waveforms.swapaxes(0, 2)
waveforms = waveforms.swapaxes(1, 2)
waveforms = waveforms.astype(dtype)
mean_waveforms = np.squeeze(np.mean(waveforms, axis=0))
max_amp_elec = np.unravel_index(mean_waveforms.argmin(), mean_waveforms.shape)[0]
group = recording.get_channel_property(recording.get_channel_ids()[max_amp_elec], grouping_property)
elec_group = np.where(rec_groups == group)
waveforms = np.squeeze(waveforms[:, elec_group, :])
if save_as_features:
if len(indices) < len(sorting.get_unit_spike_train(unit_id)):
if 'waveforms' not in sorting.get_unit_spike_feature_names(unit_id):
features = np.array([None] * len(sorting.get_unit_spike_train(unit_id)))
else:
features = np.array(sorting.get_unit_spike_features(unit_id, 'waveforms'))
for i, ind in enumerate(indices):
features[ind] = waveforms[i]
else:
features = waveforms
sorting.set_unit_spike_features(unit_id, 'waveforms', features)
waveform_list.append(waveforms)
return waveform_list
else:
for i, unit_id in enumerate(unit_ids):
# ts_ = time.time()
if unit_id not in sorting.get_unit_ids():
raise Exception("unit_ids is not in valid")
if filter:
recording = st.preprocessing.bandpass_filter(recording=recording, freq_min=bandpass[0],
freq_max=bandpass[1]).get_traces(start_frame=start_frame, end_frame=end_frame)
fs = recording.get_sampling_frequency()
n_pad = [int(ms_before * fs / 1000), int(ms_after * fs / 1000)]
if verbose:
print('Waveform ' + str(i + 1) + '/' + str(len(unit_ids)))
waveforms, indices = _get_random_spike_waveforms(recording=recording,
sorting=sorting,
unit=unit_id,
max_num=max_num_waveforms,
snippet_len=n_pad)
# print('extract wf: ', time.time() - ts_)
waveforms = waveforms.swapaxes(0, 2)
waveforms = waveforms.swapaxes(1, 2)
waveforms = waveforms.astype(dtype)
# print('swap wf: ', time.time() - ts_)
if save_as_features:
if len(indices) < len(sorting.get_unit_spike_train(unit_id)):
if 'waveforms' not in sorting.get_unit_spike_feature_names(unit_id):
features = np.array([None] * len(sorting.get_unit_spike_train(unit_id)))
else:
features = np.array(sorting.get_unit_spike_features(unit_id, 'waveforms'))
for i, ind in enumerate(indices):
features[ind] = waveforms[i]
else:
features = waveforms
sorting.set_unit_spike_features(unit_id, 'waveforms', features)
# print('append feats: ', time.time() - ts_)
waveform_list.append(waveforms)
# print('append wf: ', time.time() - ts_)
if len(waveform_list) == 1:
return waveform_list[0]
else:
return waveform_list | 0.004933 |
def GetStructFormatString(self):
"""Retrieves the Python struct format string.
Returns:
str: format string as used by Python struct or None if format string
cannot be determined.
"""
if self._format_string is None and self._data_type_maps:
format_strings = []
for member_data_type_map in self._data_type_maps:
if member_data_type_map is None:
return None
member_format_string = member_data_type_map.GetStructFormatString()
if member_format_string is None:
return None
format_strings.append(member_format_string)
self._format_string = ''.join(format_strings)
return self._format_string | 0.008633 |
def make_psd_xmldoc(psddict, xmldoc=None):
"""Add a set of PSDs to a LIGOLW XML document. If the document is not
given, a new one is created first.
"""
xmldoc = ligolw.Document() if xmldoc is None else xmldoc.childNodes[0]
# the PSDs must be children of a LIGO_LW with name "psd"
root_name = u"psd"
Attributes = ligolw.sax.xmlreader.AttributesImpl
lw = xmldoc.appendChild(
ligolw.LIGO_LW(Attributes({u"Name": root_name})))
for instrument, psd in psddict.items():
xmlseries = _build_series(psd, (u"Frequency,Real", u"Frequency"),
None, 'deltaF', 's^-1')
fs = lw.appendChild(xmlseries)
fs.appendChild(ligolw_param.Param.from_pyvalue(u"instrument",
instrument))
return xmldoc | 0.001198 |
def send(self, test=None):
'''
Send the email through the Postmark system.
Pass test=True to just print out the resulting
JSON message being sent to Postmark
'''
self._check_values()
# Set up message dictionary
json_message = self.to_json_message()
# if (self.__html_body and not self.__text_body) and self.__multipart:
# # TODO: Set up regex to strip html
# pass
# If test is not specified, attempt to read the Django setting
if test is None:
try:
from django.conf import settings as django_settings
test = getattr(django_settings, "POSTMARK_TEST_MODE", None)
except ImportError:
pass
# If this is a test, just print the message
if test:
print('JSON message is:\n%s' % json.dumps(json_message, cls=PMJSONEncoder))
return
if self.__template_id:
endpoint_url = __POSTMARK_URL__ + 'email/withTemplate/'
else:
endpoint_url = __POSTMARK_URL__ + 'email'
# Set up the url Request
req = Request(
endpoint_url,
json.dumps(json_message, cls=PMJSONEncoder).encode('utf8'),
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
# Attempt send
try:
# print 'sending request to postmark: %s' % json_message
result = urlopen(req)
jsontxt = result.read().decode('utf8')
result.close()
if result.code == 200:
self.message_id = json.loads(jsontxt).get('MessageID', None)
return True
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg))
except HTTPError as err:
if err.code == 401:
raise PMMailUnauthorizedException('Sending Unauthorized - incorrect API key.', err)
elif err.code == 422:
try:
jsontxt = err.read().decode('utf8')
jsonobj = json.loads(jsontxt)
desc = jsonobj['Message']
error_code = jsonobj['ErrorCode']
except KeyError:
raise PMMailUnprocessableEntityException('Unprocessable Entity: Description not given')
if error_code == 406:
raise PMMailInactiveRecipientException('You tried to send email to a recipient that has been marked as inactive.')
raise PMMailUnprocessableEntityException('Unprocessable Entity: %s' % desc)
elif err.code == 500:
raise PMMailServerErrorException('Internal server error at Postmark. Admins have been alerted.', err)
except URLError as err:
if hasattr(err, 'reason'):
raise PMMailURLException('URLError: Failed to reach the server: %s (See "inner_exception" for details)' % err.reason, err)
elif hasattr(err, 'code'):
raise PMMailURLException('URLError: %d: The server couldn\'t fufill the request. (See "inner_exception" for details)' % err.code, err)
else:
raise PMMailURLException('URLError: The server couldn\'t fufill the request. (See "inner_exception" for details)', err) | 0.003406 |
def postinit(self, target, annotation, simple, value=None):
"""Do some setup after initialisation.
:param target: What is being assigned to.
:type target: NodeNG
:param annotation: The type annotation of what is being assigned to.
:type: NodeNG
:param simple: Whether :attr:`target` is a pure name
or a complex statement.
:type simple: int
:param value: The value being assigned to the variables.
:type: NodeNG or None
"""
self.target = target
self.annotation = annotation
self.value = value
self.simple = simple | 0.003125 |
def save(self):
'''saves our config objet to file'''
if self.app.cfg_mode == 'json':
with open(self.app.cfg_file, 'w') as opened_file:
json.dump(self.app.cfg, opened_file)
else:
with open(self.app.cfg_file, 'w')as opened_file:
yaml.dump(self.app.cfg, opened_file) | 0.005831 |
def exists(self, path, **kwargs):
"""Return true if the given path exists"""
try:
self.get_file_status(path, **kwargs)
return True
except HdfsFileNotFoundException:
return False | 0.008439 |
def is_linear(self):
"""
Tests whether all filters in the list are linear. CascadeFilter and
ParallelFilter instances are also linear if all filters they group are
linear.
"""
return all(isinstance(filt, LinearFilter) or
(hasattr(filt, "is_linear") and filt.is_linear())
for filt in self.callables) | 0.002833 |
def get_nested_attribute(obj, attribute):
"""
Returns the value of the given (possibly dotted) attribute for the given
object.
If any of the parents on the nested attribute's name path are `None`, the
value of the nested attribute is also assumed as `None`.
:raises AttributeError: If any attribute access along the attribute path
fails with an `AttributeError`.
"""
parent, attr = resolve_nested_attribute(obj, attribute)
if not parent is None:
attr_value = getattr(parent, attr)
else:
attr_value = None
return attr_value | 0.00339 |
def generate_hooked_command(cmd_name, cmd_cls, hooks):
"""
Returns a generated subclass of ``cmd_cls`` that runs the pre- and
post-command hooks for that command before and after the ``cmd_cls.run``
method.
"""
def run(self, orig_run=cmd_cls.run):
self.run_command_hooks('pre_hooks')
orig_run(self)
self.run_command_hooks('post_hooks')
return type(cmd_name, (cmd_cls, object),
{'run': run, 'run_command_hooks': run_command_hooks,
'pre_hooks': hooks.get('pre', []),
'post_hooks': hooks.get('post', [])}) | 0.00165 |
def direction_vector(self, other: "Point2") -> "Point2":
""" Converts a vector to a direction that can face vertically, horizontally or diagonal or be zero, e.g. (0, 0), (1, -1), (1, 0) """
return self.__class__((_sign(other.x - self.x), _sign(other.y - self.y))) | 0.014337 |
def identifierify(name):
""" Clean up name so it works for a Python identifier. """
name = name.lower()
name = re.sub('[^a-z0-9]', '_', name)
return name | 0.005917 |
def _tf_predict(model_dir, input_csvlines):
"""Prediction with a tf savedmodel.
Args:
model_dir: directory that contains a saved model
input_csvlines: list of csv strings
Returns:
Dict in the form tensor_name:prediction_list. Note that the value is always
a list, even if there was only 1 row in input_csvlines.
"""
with tf.Graph().as_default(), tf.Session() as sess:
input_alias_map, output_alias_map = _tf_load_model(sess, model_dir)
csv_tensor_name = list(input_alias_map.values())[0]
results = sess.run(fetches=output_alias_map,
feed_dict={csv_tensor_name: input_csvlines})
# convert any scalar values to a list. This may happen when there is one
# example in input_csvlines and the model uses tf.squeeze on the output
# tensor.
if len(input_csvlines) == 1:
for k, v in six.iteritems(results):
if not isinstance(v, (list, np.ndarray)):
results[k] = [v]
# Convert bytes to string. In python3 the results may be bytes.
for k, v in six.iteritems(results):
if any(isinstance(x, bytes) for x in v):
results[k] = [x.decode('utf-8') for x in v]
return results | 0.010265 |
def apply_security_groups(self, security_groups):
"""
Applies security groups to the load balancer.
Applying security groups that are already registered with the
Load Balancer has no effect.
:type security_groups: string or List of strings
:param security_groups: The name of the security group(s) to add.
"""
if isinstance(security_groups, str) or \
isinstance(security_groups, unicode):
security_groups = [security_groups]
new_sgs = self.connection.apply_security_groups_to_lb(
self.name, security_groups)
self.security_groups = new_sgs | 0.005814 |
def run(self):
"""Load all artists into the database
"""
df = ArtistsInputData().load()
# rename columns
df.rename(columns={'artistLabel': 'name',
'genderLabel': 'gender'},
inplace=True)
# attribute columns that exist in the data model
attribute_columns = ['name', 'wiki_id']
# the extended model also stores the date of birth and gender
if config.EXTENDED:
attribute_columns += ['gender', 'year_of_birth']
# store entities and attributes
self.store(df, attribute_columns)
self.done() | 0.00311 |
def netconf_session_start_source_host(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
netconf_session_start = ET.SubElement(config, "netconf-session-start", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-notifications")
source_host = ET.SubElement(netconf_session_start, "source-host")
source_host.text = kwargs.pop('source_host')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.006048 |
def elapsed(self):
"""
Return a timedelta representation of the time passed sine the worker
was running.
"""
if not self.start_date:
return None
return (self.end_date or datetime.utcnow()) - self.start_date | 0.007519 |
def query_state(self, StateType):
"""
Is a button depressed?
True if a button is pressed, false otherwise.
"""
if StateType == M_LEFT:
# Checking left mouse button
return self.left_pressed
elif StateType == M_MIDDLE:
# Checking middle mouse button
return self.middle_pressed
elif StateType == M_RIGHT:
# Checking right mouse button
return self.right_pressed | 0.004124 |
def latents_to_frames(z_top_interp, level_eps_interp, hparams):
"""Decodes latents to frames."""
# Decode [z^1_t, z^2_t .. z^l_t] to [X_t]
images, _, _, _ = glow_ops.encoder_decoder(
"codec", z_top_interp, hparams, eps=level_eps_interp, reverse=True)
images = glow_ops.postprocess(images)
return images | 0.018868 |
def vector_product(v0, v1, axis=0):
"""Return vector perpendicular to vectors.
>>> v = vector_product([2, 0, 0], [0, 3, 0])
>>> np.allclose(v, [0, 0, 6])
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> v = vector_product(v0, v1)
>>> np.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> v = vector_product(v0, v1, axis=1)
>>> np.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]])
True
"""
return np.cross(v0, v1, axis=axis) | 0.001515 |
def called_with(self, *args, **kwargs):
"""Return True if the spy was called with the specified args/kwargs.
Otherwise raise VerificationError.
"""
expected_call = Call(*args, **kwargs)
if expected_call in calls(self.spy):
return True
raise VerificationError(
"expected %s to be called with %s, but it wasn't" % (
self.spy, expected_call.formatted_args)) | 0.004505 |
def PSD_fitting_eqn_with_background(A, OmegaTrap, Gamma, FlatBackground, omega):
"""
The value of the fitting equation:
A / ((OmegaTrap**2 - omega**2)**2 + (omega * Gamma)**2) + FlatBackground
to be fit to the PSD
Parameters
----------
A : float
Fitting constant A
A = γ**2*Γ_0*(2*K_b*T_0)/(π*m)
where:
γ = conversionFactor
Γ_0 = Damping factor due to environment
π = pi
OmegaTrap : float
The trapping frequency in the axis of interest
(in angular frequency)
Gamma : float
The damping factor Gamma = Γ = Γ_0 + δΓ
where:
Γ_0 = Damping factor due to environment
δΓ = extra damping due to feedback or other effects
FlatBackground : float
Adds a constant offset to the peak to account for a flat
noise background
omega : float
The angular frequency to calculate the value of the
fitting equation at
Returns
-------
Value : float
The value of the fitting equation
"""
return A / ((OmegaTrap**2 - omega**2)**2 + omega**2 * (Gamma)**2) + FlatBackground | 0.005978 |
def __substituteFromClientStatement(self,match,prevResponse,extraSymbol="",sessionID = "general"):
"""
Substitute from Client statement into respose
"""
prev = 0
startPadding = 1+len(extraSymbol)
finalResponse = ""
for m in re.finditer(r'%'+extraSymbol+'[0-9]+', prevResponse):
start = m.start(0)
end = m.end(0)
num = int(prevResponse[start+startPadding:end])
finalResponse += prevResponse[prev:start]
try:finalResponse += self._quote(self._substitute(match.group(num)),sessionID)
except IndexError as e:pass
prev = end
namedGroup = match.groupdict()
if namedGroup:
prevResponse = finalResponse + prevResponse[prev:]
finalResponse = ""
prev = 0
for m in re.finditer(r'%'+extraSymbol+'([a-zA-Z_][a-zA-Z_0-9]*)([^a-zA-Z_0-9]|$)', prevResponse):
start = m.start(1)
end = m.end(1)
finalResponse += prevResponse[prev:start]
try:
value = namedGroup[prevResponse[start+startPadding:end]]
if value:finalResponse += self._quote(self._substitute(value),sessionID)
except KeyError as e:pass
prev = end
return finalResponse + prevResponse[prev:] | 0.016595 |
def apply(cls, self, *args, **kwargs):
"""
Applies kwargs arguments to the instance passed as the first
argument to the call.
For defined INPUTS, OUTPUTS and PARAMETERS the method extracts
a corresponding value from kwargs and sets it as an instance attribute.
For example, if the processor has a 'foo' parameter declared and
'foo = something' is passed to apply(), self.foo will become
'something'.
"""
for key in kwargs:
if key in [ x.name for x in cls.INPUTS ]:
setattr(self, key, kwargs[key])
if key in [ x.name for x in cls.OUTPUTS ]:
setattr(self, key, kwargs[key])
if key in [ x.name for x in cls.PARAMETERS ]:
setattr(self, key, kwargs[key]) | 0.013819 |
def gotoHome(self):
"""
Navigates to the home position for the edit.
"""
mode = QTextCursor.MoveAnchor
# select the home
if QApplication.instance().keyboardModifiers() == Qt.ShiftModifier:
mode = QTextCursor.KeepAnchor
cursor = self.textCursor()
block = projex.text.nativestring(cursor.block().text())
cursor.movePosition( QTextCursor.StartOfBlock, mode )
if block.startswith('>>> '):
cursor.movePosition(QTextCursor.Right, mode, 4)
elif block.startswith('... '):
match = re.match('...\s*', block)
cursor.movePosition(QTextCursor.Right, mode, match.end())
self.setTextCursor(cursor) | 0.012771 |
def get_child_by_qualifier(self, parent_qualifier):
"""
:param parent_qualifier: time_qualifier of the parent process
:return: <HierarchyEntry> child entry to the HierarchyEntry associated with the parent_qualifier
or None if the given parent_qualifier is not registered in this hierarchy
or None if the given parent_qualifier is the bottom process
"""
if parent_qualifier not in self.qualifiers:
return None
process_qualifiers = list(self.qualifiers)
if parent_qualifier == process_qualifiers[-1]:
return None
parent_index = process_qualifiers.index(parent_qualifier)
return self.qualifiers[process_qualifiers[parent_index + 1]] | 0.00527 |
def norm(self, x):
"""Calculate the norm of an element.
This is the standard implementation using `inner`.
Subclasses should override it for optimization purposes.
Parameters
----------
x1 : `LinearSpaceElement`
Element whose norm is calculated.
Returns
-------
norm : float
The norm of the element.
"""
return float(np.sqrt(self.inner(x, x).real)) | 0.00432 |
def _note_reply_pending(self, option, state):
"""Record the status of requested Telnet options."""
if not self.telnet_opt_dict.has_key(option):
self.telnet_opt_dict[option] = TelnetOption()
self.telnet_opt_dict[option].reply_pending = state | 0.01087 |
def lemmatize(self):
"""Lemmatize all Units in self.unit_list.
Modifies:
- self.unit_list: converts the .text property into its lemmatized form.
This method lemmatizes all inflected variants of permissible words to
those words' respective canonical forms. This is done to ensure that
each instance of a permissible word will correspond to a term vector with
which semantic relatedness to other words' term vectors can be computed.
(Term vectors were derived from a corpus in which inflected words were
similarly lemmatized, meaning that , e.g., 'dogs' will not have a term
vector to use for semantic relatedness computation.)
"""
for unit in self.unit_list:
if lemmatizer.lemmatize(unit.text) in self.lemmas:
unit.text = lemmatizer.lemmatize(unit.text) | 0.006772 |
def populate_freqs(self):
"""
Populate frequency axis
"""
if self.header[b'foff'] < 0:
f0 = self.f_end
else:
f0 = self.f_begin
self._setup_chans()
#create freq array
i_vals = np.arange(self.chan_start_idx, self.chan_stop_idx)
freqs = self.header[b'foff'] * i_vals + f0
return freqs | 0.007692 |
def index(self):
'''
Index funtion.
'''
self.render('ext_excel/index.html',
userinfo=self.userinfo,
cfg=CMS_CFG,
kwd={}, ) | 0.009259 |
def clean_tempdir(context, scenario):
"""
Clean up temporary test dirs for passed tests.
Leave failed test dirs for manual inspection.
"""
tempdir = getattr(context, 'tempdir', None)
if tempdir and scenario.status == 'passed':
shutil.rmtree(tempdir)
del(context.tempdir) | 0.003205 |
def Print(self, output_writer):
"""Prints a human readable version of the filter.
Args:
output_writer (CLIOutputWriter): output writer.
"""
if self._date_time_ranges:
for date_time_range in self._date_time_ranges:
if date_time_range.start_date_time is None:
end_time_string = date_time_range.end_date_time.CopyToDateTimeString()
output_writer.Write('\t{0:s} after {1:s}\n'.format(
date_time_range.time_value, end_time_string))
elif date_time_range.end_date_time is None:
start_time_string = (
date_time_range.start_date_time.CopyToDateTimeString())
output_writer.Write('\t{0:s} before {1:s}\n'.format(
date_time_range.time_value, start_time_string))
else:
start_time_string = (
date_time_range.start_date_time.CopyToDateTimeString())
end_time_string = date_time_range.end_date_time.CopyToDateTimeString()
output_writer.Write('\t{0:s} between {1:s} and {2:s}\n'.format(
date_time_range.time_value, start_time_string,
end_time_string)) | 0.009632 |
def set_min_leverage(self, min_leverage, grace_period):
"""Set a limit on the minimum leverage of the algorithm.
Parameters
----------
min_leverage : float
The minimum leverage for the algorithm.
grace_period : pd.Timedelta
The offset from the start date used to enforce a minimum leverage.
"""
deadline = self.sim_params.start_session + grace_period
control = MinLeverage(min_leverage, deadline)
self.register_account_control(control) | 0.003759 |
def Defaults(self,key):
"""Returns default configurations for resources deployed to this group.
If specified key is not defined returns None.
# {"cpu":{"inherited":false},"memoryGB":{"inherited":false},"networkId":{"inherited":false},
# "primaryDns":{"value":"172.17.1.26","inherited":true},"secondaryDns":{"value":"172.17.1.27","inherited":true},
# "templateName":{"value":"WIN2012DTC-64","inherited":false}}
"""
if not hasattr(self,'defaults'):
self.defaults = clc.v2.API.Call('GET','groups/%s/%s/defaults' % (self.alias,self.id), session=self.session)
try:
return(self.defaults[key]['value'])
except:
return(None) | 0.03096 |
def pix2sky_ellipse(self, pixel, sx, sy, theta):
"""
Convert an ellipse from pixel to sky coordinates.
Parameters
----------
pixel : (float, float)
The (x, y) coordinates of the center of the ellipse.
sx, sy : float
The major and minor axes (FHWM) of the ellipse, in pixels.
theta : float
The rotation angle of the ellipse (degrees).
theta = 0 corresponds to the ellipse being aligned with the x-axis.
Returns
-------
ra, dec : float
The (ra, dec) coordinates of the center of the ellipse (degrees).
a, b : float
The semi-major and semi-minor axis of the ellipse (degrees).
pa : float
The position angle of the ellipse (degrees).
"""
ra, dec = self.pix2sky(pixel)
x, y = pixel
v_sx = [x + sx * np.cos(np.radians(theta)),
y + sx * np.sin(np.radians(theta))]
ra2, dec2 = self.pix2sky(v_sx)
major = gcd(ra, dec, ra2, dec2)
pa = bear(ra, dec, ra2, dec2)
v_sy = [x + sy * np.cos(np.radians(theta - 90)),
y + sy * np.sin(np.radians(theta - 90))]
ra2, dec2 = self.pix2sky(v_sy)
minor = gcd(ra, dec, ra2, dec2)
pa2 = bear(ra, dec, ra2, dec2) - 90
# The a/b vectors are perpendicular in sky space, but not always in pixel space
# so we have to account for this by calculating the angle between the two vectors
# and modifying the minor axis length
defect = pa - pa2
minor *= abs(np.cos(np.radians(defect)))
return ra, dec, major, minor, pa | 0.002375 |
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close() | 0.002335 |
def write_encoded(file_obj,
stuff,
encoding='utf-8'):
"""
If a file is open in binary mode and a string is passed, encode and write
If a file is open in text mode and bytes are passed, decode and write
Parameters
-----------
file_obj: file object, with 'write' and 'mode'
stuff: str or bytes, stuff to be written
encoding: str, encoding of text
"""
binary_file = 'b' in file_obj.mode
string_stuff = isinstance(stuff, basestring)
binary_stuff = isinstance(stuff, bytes)
if not PY3:
file_obj.write(stuff)
elif binary_file and string_stuff:
file_obj.write(stuff.encode(encoding))
elif not binary_file and binary_stuff:
file_obj.write(stuff.decode(encoding))
else:
file_obj.write(stuff)
file_obj.flush()
return stuff | 0.001149 |
def partition(pred, iterable, tolist=False):
'Use a predicate to partition entries into false entries and true entries'
# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
t1, t2 = itertools.tee(iterable)
ifalse = six.moves.filterfalse(pred, t1)
itrue = six.moves.filter(pred, t2)
if tolist:
return list(ifalse), list(itrue)
else:
return ifalse, itrue | 0.002469 |
def stats(data):
'''Dictionary with summary stats for data
Returns:
dicitonary with length, mean, sum, standard deviation,\
min and max of data
'''
return {'len': len(data),
'mean': np.mean(data),
'sum': np.sum(data),
'std': np.std(data),
'min': np.min(data),
'max': np.max(data)} | 0.002653 |
def setTypingStatus(self, status, thread_id=None, thread_type=None):
"""
Sets users typing status in a thread
:param status: Specify the typing status
:param thread_id: User/Group ID to change status in. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type status: models.TypingStatus
:type thread_type: models.ThreadType
:raises: FBchatException if request failed
"""
thread_id, thread_type = self._getThread(thread_id, thread_type)
data = {
"typ": status.value,
"thread": thread_id,
"to": thread_id if thread_type == ThreadType.USER else "",
"source": "mercury-chat",
}
j = self._post(self.req_url.TYPING, data, fix_request=True, as_json=True) | 0.004866 |
def adjust_commission_cost_basis(self, asset, cost):
"""
A note about cost-basis in zipline: all positions are considered
to share a cost basis, even if they were executed in different
transactions with different commission costs, different prices, etc.
Due to limitations about how zipline handles positions, zipline will
currently spread an externally-delivered commission charge across
all shares in a position.
"""
if asset != self.asset:
raise Exception('Updating a commission for a different asset?')
if cost == 0.0:
return
# If we no longer hold this position, there is no cost basis to
# adjust.
if self.amount == 0:
return
# We treat cost basis as the share price where we have broken even.
# For longs, commissions cause a relatively straight forward increase
# in the cost basis.
#
# For shorts, you actually want to decrease the cost basis because you
# break even and earn a profit when the share price decreases.
#
# Shorts are represented as having a negative `amount`.
#
# The multiplication and division by `amount` cancel out leaving the
# cost_basis positive, while subtracting the commission.
prev_cost = self.cost_basis * self.amount
if isinstance(asset, Future):
cost_to_use = cost / asset.price_multiplier
else:
cost_to_use = cost
new_cost = prev_cost + cost_to_use
self.cost_basis = new_cost / self.amount | 0.00123 |
def nl_cb_call(cb, type_, msg):
"""Call a callback function.
https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink-private/netlink.h#L136
Positional arguments:
cb -- nl_cb class instance.
type_ -- callback type integer (e.g. NL_CB_MSG_OUT).
msg -- Netlink message (nl_msg class instance).
Returns:
Integer from the callback function (like NL_OK, NL_SKIP, etc).
"""
cb.cb_active = type_
ret = cb.cb_set[type_](msg, cb.cb_args[type_])
cb.cb_active = 10 + 1 # NL_CB_TYPE_MAX + 1
return int(ret) | 0.001789 |
def set(self, item, column=None, value=None):
"""
Query or set the value of given item.
With one argument, return a dictionary of column/value pairs for the
specified item. With two arguments, return the current value of the
specified column. With three arguments, set the value of given column
in given item to the specified value.
:param item: item's identifier
:type item: str
:param column: column's identifier
:type column: str, int or None
:param value: new value
"""
if value is not None:
self._visual_drag.set(item, ttk.Treeview.column(self, column, 'id'), value)
return ttk.Treeview.set(self, item, column, value) | 0.004011 |
def run_script(self,
script,
in_shell=True,
echo=None,
note=None,
loglevel=logging.DEBUG):
"""Run the passed-in string as a script on the target's command line.
@param script: String representing the script. It will be de-indented
and stripped before being run.
@param in_shell: Indicate whether we are in a shell or not. (Default: True)
@param note: See send()
@type script: string
@type in_shell: boolean
"""
shutit = self.shutit
shutit.handle_note(note, 'Script: ' + str(script))
shutit.log('Running script beginning: "' + ''.join(script.split())[:30] + ' [...]', level=logging.INFO)
# Trim any whitespace lines from start and end of script, then dedent
lines = script.split('\n')
while lines and re.match('^[ \t]*$', lines[0]):
lines = lines[1:]
while lines and re.match('^[ \t]*$', lines[-1]):
lines = lines[:-1]
if not lines:
return True
script = '\n'.join(lines)
script = textwrap.dedent(script)
# Send the script and run it in the manner specified
if shutit.build['delivery'] in ('docker','dockerfile') and in_shell:
script = ('set -o xtrace \n\n' + script + '\n\nset +o xtrace')
self.quick_send('command mkdir -p ' + shutit_global.shutit_global_object.shutit_state_dir + '/scripts && chmod 777 ' + shutit_global.shutit_global_object.shutit_state_dir + '/scripts', echo=False)
self.send_file(shutit_global.shutit_global_object.shutit_state_dir + '/scripts/shutit_script.sh',
script,
echo=False,
loglevel=loglevel)
self.quick_send('command chmod +x ' + shutit_global.shutit_global_object.shutit_state_dir + '/scripts/shutit_script.sh', echo=False)
shutit.build['shutit_command_history'].append(' ' + script.replace('\n', '\n '))
if in_shell:
ret = self.send(ShutItSendSpec(self,
send=' . ' + shutit_global.shutit_global_object.shutit_state_dir + '/scripts/shutit_script.sh && rm -f ' + shutit_global.shutit_global_object.shutit_state_dir + '/scripts/shutit_script.sh && rm -f ' + shutit_global.shutit_global_object.shutit_state_dir + '/scripts/shutit_script.sh',
echo=False,
loglevel=loglevel))
else:
ret = self.send(ShutItSendSpec(self,
send=' ' + shutit_global.shutit_global_object.shutit_state_dir + '/scripts/shutit_script.sh && rm -f ' + shutit_global.shutit_global_object.shutit_state_dir + '/scripts/shutit_script.sh',
echo=False,
loglevel=loglevel))
shutit.handle_note_after(note=note)
return ret | 0.028436 |
def start_logging(out=_stdout, level='info'):
"""
Begin logging.
:param out: if provided, a file-like object to log to. By default, this is
stdout.
:param level: the maximum log-level to emit (a string)
"""
global _log_level, _loggers, _started_logging
if level not in log_levels:
raise RuntimeError(
"Invalid log level '{0}'; valid are: {1}".format(
level, ', '.join(log_levels)
)
)
if _started_logging:
return
_started_logging = True
_log_level = level
handler = _TxaioFileHandler(out)
logging.getLogger().addHandler(handler)
# note: Don't need to call basicConfig() or similar, because we've
# now added at least one handler to the root logger
logging.raiseExceptions = True # FIXME
level_to_stdlib = {
'critical': logging.CRITICAL,
'error': logging.ERROR,
'warn': logging.WARNING,
'info': logging.INFO,
'debug': logging.DEBUG,
'trace': logging.DEBUG,
}
logging.getLogger().setLevel(level_to_stdlib[level])
# make sure any loggers we created before now have their log-level
# set (any created after now will get it from _log_level
for logger in _loggers:
logger._set_log_level(level) | 0.000763 |
def map_version(self, requirement, local_version):
"""
Maps a local version name to one recognised by the Requirement class
Parameters
----------
requirement : str
Name of the requirement
version : str
version string
"""
if isinstance(self._versions_map, dict):
version = self._versions_map.get(requirement, {}).get(
local_version, local_version)
else:
version = self._versions_map(requirement, local_version)
return version | 0.003515 |
def build_plugin(cls, class_name, config):
"""Create an instance of the named plugin and return it
:param class_name: fully qualified name of class
:type class_name: str
:param config: the supporting configuration for plugin
:type config: PluginConfig
:rtype: AbstractPlugin
:return: an instance of a concrete implementation of AbstractPlugin
"""
mod_path, class_name = class_name.rsplit('.', 1)
plugin_cls = getattr(importlib.import_module(mod_path), class_name)
return plugin_cls(config) | 0.006908 |
def __initialize_ui(self):
"""
Initializes the Widget ui.
"""
self.setAutoScroll(True)
self.setIndentation(self.__tree_view_indentation)
self.setRootIsDecorated(False)
self.setDragDropMode(QAbstractItemView.DragOnly)
self.header().hide()
self.setSortingEnabled(True)
self.sortByColumn(0, Qt.AscendingOrder)
self.__set_default_ui_state()
# Signals / Slots.
self.model().modelReset.connect(self.__set_default_ui_state) | 0.00381 |
def bindargs(fun, *argsbind, **kwbind):
"""
_ = bind.placeholder # unbound placeholder (arg)
f = bind(fun, _, _, arg3, kw=kw1, kw2=kw2), f(arg1, arg2)
:param fun:
:param argsbind:
:param kwbind:
:return:
"""
assert argsbind
argsb = list(argsbind)
iargs = [i for i in range(len(argsbind)) if argsbind[i] is bind.placeholder]
# iargs = [a is bind.placeholder for a in argsbind]
@functools.wraps(fun)
def wrapped(*args, **kwargs):
kws = kwbind.copy()
args_this = [a for a in argsb]
for i, arg in zip(iargs, args):
args_this[i] = arg
args_this.extend(args[len(iargs):])
# kwargs.update(kwbind)
kws.update(kwargs)
# return fun(*argsb, **kws)
return fun(*args_this, **kws)
return wrapped | 0.002427 |
def merge_data_and_coords(data, coords, compat='broadcast_equals',
join='outer'):
"""Used in Dataset.__init__."""
objs = [data, coords]
explicit_coords = coords.keys()
indexes = dict(extract_indexes(coords))
return merge_core(objs, compat, join, explicit_coords=explicit_coords,
indexes=indexes) | 0.002755 |
def as_matrix(self, depth=0):
"""
Create a matrix with self as node, cache it, return it.
Args:
depth (int): depth of the matrix.
Returns:
Matrix: an instance of Matrix.
"""
if depth in self._matrix_cache:
return self._matrix_cache[depth]
self._matrix_cache[depth] = matrix = Matrix(self, depth=depth)
return matrix | 0.004796 |
def convert_dropout(params, w_name, scope_name, inputs, layers, weights, names):
"""
Convert dropout.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting dropout ...')
if names == 'short':
tf_name = 'DO' + random_string(6)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
dropout = keras.layers.Dropout(rate=params['ratio'], name=tf_name)
layers[scope_name] = dropout(layers[inputs[0]]) | 0.002628 |
def get_localhost():
'''
Should return 127.0.0.1 in ipv4 and ::1 in ipv6
localhost is not used because on windows vista/windows 7, there can be issues where the resolving doesn't work
properly and takes a lot of time (had this issue on the pyunit server).
Using the IP directly solves the problem.
'''
# TODO: Needs better investigation!
global _cache
if _cache is None:
try:
for addr_info in socket.getaddrinfo("localhost", 80, 0, 0, socket.SOL_TCP):
config = addr_info[4]
if config[0] == '127.0.0.1':
_cache = '127.0.0.1'
return _cache
except:
# Ok, some versions of Python don't have getaddrinfo or SOL_TCP... Just consider it 127.0.0.1 in this case.
_cache = '127.0.0.1'
else:
_cache = 'localhost'
return _cache | 0.005537 |
def read(self, **keys):
"""
Read the image.
If the HDU is an IMAGE_HDU, read the corresponding image. Compression
and scaling are dealt with properly.
"""
if not self.has_data():
return None
dtype, shape = self._get_dtype_and_shape()
array = numpy.zeros(shape, dtype=dtype)
self._FITS.read_image(self._ext+1, array)
return array | 0.004728 |
def setdocument(self, doc):
"""Associate a document with this element.
Arguments:
doc (:class:`Document`): A document
Each element must be associated with a FoLiA document.
"""
assert isinstance(doc, Document)
if not self.doc:
self.doc = doc
if self.id:
if self.id in doc:
raise DuplicateIDError(self.id)
else:
self.doc.index[id] = self
for e in self: #recursive for all children
if isinstance(e,AbstractElement): e.setdocument(doc) | 0.009804 |
def GetFlagSuggestions(attempt, longopt_list):
"""Get helpful similar matches for an invalid flag."""
# Don't suggest on very short strings, or if no longopts are specified.
if len(attempt) <= 2 or not longopt_list:
return []
option_names = [v.split('=')[0] for v in longopt_list]
# Find close approximations in flag prefixes.
# This also handles the case where the flag is spelled right but ambiguous.
distances = [(_DamerauLevenshtein(attempt, option[0:len(attempt)]), option)
for option in option_names]
distances.sort(key=lambda t: t[0])
least_errors, _ = distances[0]
# Don't suggest excessively bad matches.
if least_errors >= _SUGGESTION_ERROR_RATE_THRESHOLD * len(attempt):
return []
suggestions = []
for errors, name in distances:
if errors == least_errors:
suggestions.append(name)
else:
break
return suggestions | 0.018952 |
def bitsToString(arr):
"""Returns a string representing a numpy array of 0's and 1's"""
s = array('c','.'*len(arr))
for i in xrange(len(arr)):
if arr[i] == 1:
s[i]='*'
return s | 0.041237 |
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'softlayer_hw',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
name = vm_['name']
hostname = name
domain = config.get_cloud_config_value(
'domain', vm_, __opts__, default=None
)
if domain is None:
SaltCloudSystemExit(
'A domain name is required for the SoftLayer driver.'
)
if vm_.get('use_fqdn'):
name = '.'.join([name, domain])
vm_['name'] = name
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
conn = get_conn(service='SoftLayer_Product_Order')
kwargs = {
'complexType': 'SoftLayer_Container_Product_Order_Hardware_Server',
'quantity': 1,
'hardware': [{
'hostname': hostname,
'domain': domain,
}],
# Baremetal Package
'packageId': 50,
'prices': [
# Size Ex: 1921: 2 x 2.0 GHz Core Bare Metal Instance - 2 GB Ram
{'id': vm_['size']},
# HDD Ex: 19: 250GB SATA II
{'id': vm_['hdd']},
# Image Ex: 13963: CentOS 6.0 - Minimal Install (64 bit)
{'id': vm_['image']},
# The following items are currently required
# Reboot / Remote Console
{'id': '905'},
# 1 IP Address
{'id': '21'},
# Host Ping Monitoring
{'id': '55'},
# Email and Ticket Notifications
{'id': '57'},
# Automated Notification Response
{'id': '58'},
# Unlimited SSL VPN Users & 1 PPTP VPN User per account
{'id': '420'},
# Nessus Vulnerability Assessment & Reporting
{'id': '418'},
],
}
optional_products = config.get_cloud_config_value(
'optional_products', vm_, __opts__, default=[]
)
for product in optional_products:
kwargs['prices'].append({'id': product})
# Default is 273 (100 Mbps Public & Private Networks)
port_speed = config.get_cloud_config_value(
'port_speed', vm_, __opts__, default=273
)
kwargs['prices'].append({'id': port_speed})
# Default is 1800 (0 GB Bandwidth)
bandwidth = config.get_cloud_config_value(
'bandwidth', vm_, __opts__, default=1800
)
kwargs['prices'].append({'id': bandwidth})
post_uri = config.get_cloud_config_value(
'post_uri', vm_, __opts__, default=None
)
if post_uri:
kwargs['prices'].append({'id': post_uri})
vlan_id = config.get_cloud_config_value(
'vlan', vm_, __opts__, default=False
)
if vlan_id:
kwargs['primaryNetworkComponent'] = {
'networkVlan': {
'id': vlan_id,
}
}
location = get_location(vm_)
if location:
kwargs['location'] = location
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', kwargs, list(kwargs)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
try:
response = conn.placeOrder(kwargs)
# Leaving the following line in, commented, for easy debugging
#response = conn.verifyOrder(kwargs)
except Exception as exc:
log.error(
'Error creating %s on SoftLayer\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: \n%s', name, exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
def wait_for_ip():
'''
Wait for the IP address to become available
'''
nodes = list_nodes_full()
if 'primaryIpAddress' in nodes[hostname]:
return nodes[hostname]['primaryIpAddress']
time.sleep(1)
return False
ip_address = salt.utils.cloud.wait_for_fun(
wait_for_ip,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
ssh_connect_timeout = config.get_cloud_config_value(
# 15 minutes
'ssh_connect_timeout', vm_, __opts__, 900
)
if not salt.utils.cloud.wait_for_port(ip_address,
timeout=ssh_connect_timeout):
raise SaltCloudSystemExit(
'Failed to authenticate against remote ssh'
)
pass_conn = get_conn(service='SoftLayer_Account')
mask = {
'virtualGuests': {
'powerState': '',
'operatingSystem': {
'passwords': ''
},
},
}
def get_passwd():
'''
Wait for the password to become available
'''
node_info = pass_conn.getVirtualGuests(id=response['id'], mask=mask)
for node in node_info:
if node['id'] == response['id'] \
and 'passwords' in node['operatingSystem'] \
and node['operatingSystem']['passwords']:
return node['operatingSystem']['passwords'][0]['password']
time.sleep(5)
return False
passwd = salt.utils.cloud.wait_for_fun(
get_passwd,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
response['password'] = passwd
response['public_ip'] = ip_address
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
vm_['ssh_host'] = ip_address
vm_['password'] = passwd
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(response)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret | 0.001021 |
def fix_lockfile(self):
"""Run each line of outfile through fix_pin"""
with open(self.outfile, 'rt') as fp:
lines = [
self.fix_pin(line)
for line in self.concatenated(fp)
]
with open(self.outfile, 'wt') as fp:
fp.writelines([
line + '\n'
for line in lines
if line is not None
]) | 0.004651 |
def _make_routing_list(api_provider):
"""
Returns a list of routes to configure the Local API Service based on the APIs configured in the template.
Parameters
----------
api_provider : samcli.commands.local.lib.sam_api_provider.SamApiProvider
Returns
-------
list(samcli.local.apigw.service.Route)
List of Routes to pass to the service
"""
routes = []
for api in api_provider.get_all():
route = Route(methods=[api.method], function_name=api.function_name, path=api.path,
binary_types=api.binary_media_types)
routes.append(route)
return routes | 0.007102 |
def coarse_grain_transition_matrix(P, M):
""" Coarse grain transition matrix P using memberships M
Computes
.. math:
Pc = (M' M)^-1 M' P M
Parameters
----------
P : ndarray(n, n)
microstate transition matrix
M : ndarray(n, m)
membership matrix. Membership to macrostate m for each microstate.
Returns
-------
Pc : ndarray(m, m)
coarse-grained transition matrix.
"""
# coarse-grain matrix: Pc = (M' M)^-1 M' P M
W = np.linalg.inv(np.dot(M.T, M))
A = np.dot(np.dot(M.T, P), M)
P_coarse = np.dot(W, A)
# this coarse-graining can lead to negative elements. Setting them to zero here.
P_coarse = np.maximum(P_coarse, 0)
# and renormalize
P_coarse /= P_coarse.sum(axis=1)[:, None]
return P_coarse | 0.002472 |
def amplification_type(self, channels=None):
"""
Get the amplification type used for the specified channel(s).
Each channel uses one of two amplification types: linear or
logarithmic. This function returns, for each channel, a tuple of
two numbers, in which the first number indicates the number of
decades covered by the logarithmic amplifier, and the second
indicates the linear value corresponding to the channel value zero.
If the first value is zero, the amplifier used is linear
The amplification type for channel "n" is extracted from the
required $PnE parameter.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to get the amplification type. If None,
return a list with the amplification type of all channels, in
the order of ``FCSData.channels``.
Return
------
tuple, or list of tuples
The amplification type of the specified channel(s). This is
reported as a tuple, in which the first element indicates how
many decades the logarithmic amplifier covers, and the second
indicates the linear value that corresponds to a channel value
of zero. If the first element is zero, the amplification type
is linear.
"""
# Check default
if channels is None:
channels = self._channels
# Get numerical indices of channels
channels = self._name_to_index(channels)
# Get detector type of the specified channels
if hasattr(channels, '__iter__') \
and not isinstance(channels, six.string_types):
return [self._amplification_type[ch] for ch in channels]
else:
return self._amplification_type[channels] | 0.001057 |
def _checkTimeValue( timevalue, maxvalue ):
"""Check that the given timevalue is valid.
Args:
* timevalue (numerical): The time value to be checked. Must be positive.
* maxvalue (numerical): Upper limit for time value. Must be positive.
Raises:
TypeError, ValueError
"""
if maxvalue is None:
raise TypeError('The maxvalue (for the time value) must not be None!')
minimalmodbus._checkNumerical(timevalue, minvalue=0, maxvalue=maxvalue, description='time value') | 0.016729 |
def classical(vulnerability_function, hazard_imls, hazard_poes, loss_ratios):
"""
:param vulnerability_function:
an instance of
:py:class:`openquake.risklib.scientific.VulnerabilityFunction`
representing the vulnerability function used to compute the curve.
:param hazard_imls:
the hazard intensity measure type and levels
:type hazard_poes:
the hazard curve
:param loss_ratios:
a tuple of C loss ratios
:returns:
an array of shape (2, C)
"""
assert len(hazard_imls) == len(hazard_poes), (
len(hazard_imls), len(hazard_poes))
vf = vulnerability_function
imls = vf.mean_imls()
lrem = vf.loss_ratio_exceedance_matrix(loss_ratios)
# saturate imls to hazard imls
min_val, max_val = hazard_imls[0], hazard_imls[-1]
numpy.putmask(imls, imls < min_val, min_val)
numpy.putmask(imls, imls > max_val, max_val)
# interpolate the hazard curve
poes = interpolate.interp1d(hazard_imls, hazard_poes)(imls)
# compute the poos
pos = pairwise_diff(poes)
lrem_po = numpy.empty(lrem.shape)
for idx, po in enumerate(pos):
lrem_po[:, idx] = lrem[:, idx] * po # column * po
return numpy.array([loss_ratios, lrem_po.sum(axis=1)]) | 0.000788 |
def index():
"""
This is not served anywhere in the web application.
It is used explicitly in the context of generating static files since
flask-frozen requires url_for's to crawl content.
url_for's are not used with host.show_host directly and are instead
dynamically generated through javascript for performance purposes.
"""
if current_app.config['ARA_PLAYBOOK_OVERRIDE'] is not None:
override = current_app.config['ARA_PLAYBOOK_OVERRIDE']
hosts = (models.Host.query
.filter(models.Host.playbook_id.in_(override)))
else:
hosts = models.Host.query.all()
return render_template('host_index.html', hosts=hosts) | 0.001445 |
def _getPFilename(self,native,prompt):
"""Get p_filename field for this parameter
Same as get for non-list params
"""
return self.get(native=native,prompt=prompt) | 0.025641 |
def size(self):
"""Get the current terminal size."""
for fd in range(3):
cr = self._ioctl_GWINSZ(fd)
if cr:
break
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = self._ioctl_GWINSZ(fd)
os.close(fd)
except Exception:
pass
if not cr:
env = os.environ
cr = (env.get('LINES', 25), env.get('COLUMNS', 80))
return int(cr[1]), int(cr[0]) | 0.003724 |
def throws(self, exception=Exception):
"""
Customizes the stub function to raise an exception. If conditions like withArgs or onCall
were specified, then the return value will only be returned when the conditions are met.
Args: exception (by default=Exception, it could be any customized exception)
Return: a SinonStub object (able to be chained)
"""
def exception_function(*args, **kwargs):
raise exception
self._copy._append_condition(self, exception_function)
return self | 0.008945 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.