text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def hermite_interpolate(
x, y, precision=250, type='cardinal', c=None, b=None, t=None
):
"""
Interpolate x, y using the hermite method.
See https://en.wikipedia.org/wiki/Cubic_Hermite_spline
This interpolation is configurable and contain 4 subtypes:
* Catmull Rom
* Finite Difference
* Cardinal
* Kochanek Bartels
The cardinal subtype is customizable with a parameter:
* c: tension (0, 1)
This last type is also customizable using 3 parameters:
* c: continuity (-1, 1)
* b: bias (-1, 1)
* t: tension (-1, 1)
"""
n = len(x) - 1
m = [1] * (n + 1)
w = [1] * (n + 1)
delta_x = [x2 - x1 for x1, x2 in zip(x, x[1:])]
if type == 'catmull_rom':
type = 'cardinal'
c = 0
if type == 'finite_difference':
for i in range(1, n):
m[i] = w[i] = .5 * ((y[i + 1] - y[i]) / (x[i + 1] - x[i]) +
(y[i] - y[i - 1]) / (x[i] - x[i - 1])
) if x[i + 1] - x[i] and x[i] - x[i - 1] else 0
elif type == 'kochanek_bartels':
c = c or 0
b = b or 0
t = t or 0
for i in range(1, n):
m[i] = .5 * ((1 - t) * (1 + b) * (1 + c) * (y[i] - y[i - 1]) +
(1 - t) * (1 - b) * (1 - c) * (y[i + 1] - y[i]))
w[i] = .5 * ((1 - t) * (1 + b) * (1 - c) * (y[i] - y[i - 1]) +
(1 - t) * (1 - b) * (1 + c) * (y[i + 1] - y[i]))
if type == 'cardinal':
c = c or 0
for i in range(1, n):
m[i] = w[i] = (1 - c) * (y[i + 1] - y[i - 1]) / (
x[i + 1] - x[i - 1]
) if x[i + 1] - x[i - 1] else 0
def p(i, x_):
t = (x_ - x[i]) / delta_x[i]
t2 = t * t
t3 = t2 * t
h00 = 2 * t3 - 3 * t2 + 1
h10 = t3 - 2 * t2 + t
h01 = -2 * t3 + 3 * t2
h11 = t3 - t2
return (
h00 * y[i] + h10 * m[i] * delta_x[i] + h01 * y[i + 1] +
h11 * w[i + 1] * delta_x[i]
)
for i in range(n + 1):
yield x[i], y[i]
if i == n or delta_x[i] == 0:
continue
for s in range(1, precision):
X = x[i] + s * delta_x[i] / precision
yield X, p(i, X) | 0.000435 |
def _by_version_descending(names):
"""
Given a list of filenames, return them in descending order
by version number.
>>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
>>> _by_version_descending(names)
['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
"""
def _by_version(name):
"""
Parse each component of the filename
"""
name, ext = os.path.splitext(name)
parts = itertools.chain(name.split('-'), [ext])
return [packaging.version.parse(part) for part in parts]
return sorted(names, key=_by_version, reverse=True) | 0.001053 |
def set_led_brightness(self, brightness):
"""Set the LED brightness for the current group/button."""
set_cmd = self._create_set_property_msg("_led_brightness", 0x07,
brightness)
self._send_method(set_cmd, self._property_set) | 0.006757 |
def _sparse_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes (and
are sparse)
"""
new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
block = make_block(array, placement=[i])
new_blocks.append(block)
return new_blocks | 0.002907 |
def to_dict(self):
"""Return a dictionary representation of the Predicate."""
return {
'predicate': self.predicate,
'parents': list(self.supertypes),
'synopses': [[role.to_dict() for role in synopsis]
for synopsis in self.synopses]
} | 0.006289 |
def populateFromFile(self, dataUrl):
"""
Populates the instance variables of this ReferencSet from the
data URL.
"""
self._dataUrl = dataUrl
fastaFile = self.getFastaFile()
for referenceName in fastaFile.references:
reference = HtslibReference(self, referenceName)
# TODO break this up into chunks and calculate the MD5
# in bits (say, 64K chunks?)
bases = fastaFile.fetch(referenceName)
md5checksum = hashlib.md5(bases).hexdigest()
reference.setMd5checksum(md5checksum)
reference.setLength(len(bases))
self.addReference(reference) | 0.002928 |
def send_request(req=None, method=None, requires_response=True):
"""Call function req and then send its results via ZMQ."""
if req is None:
return functools.partial(send_request, method=method,
requires_response=requires_response)
@functools.wraps(req)
def wrapper(self, *args, **kwargs):
params = req(self, *args, **kwargs)
_id = self.send(method, params, requires_response)
return _id
wrapper._sends = method
return wrapper | 0.001942 |
def create_parser(self, prog_name, subcommand):
"""
Override the base create_parser() method to add this command's custom
options in Django 1.7 and below.
"""
if not self.use_argparse:
self.__class__.option_list = TestCommand.option_list + self.custom_options
parser = super(Command, self).create_parser(prog_name, subcommand)
return parser | 0.007353 |
def _get_instance_attributes(self):
"""Return a generator for instance attributes' name and value.
.. code-block:: python3
for _name, _value in self._get_instance_attributes():
print("attribute name: {}".format(_name))
print("attribute value: {}".format(_value))
Returns:
generator: tuples with attribute name and value.
"""
for name, value in self.__dict__.items():
if name in map((lambda x: x[0]), self.get_class_attributes()):
yield (name, value) | 0.003466 |
def process_normal_line( self, line ):
"""process a normal line and check whether it is the start of a new block"""
for f in re_source_block_formats:
if f.start.match( line ):
self.add_block_lines()
self.format = f
self.lineno = fileinput.filelineno()
self.lines.append( line ) | 0.027548 |
def get_device(self, device_id):
"""
Return specified device.
Returns a Command.
"""
def process_result(result):
return Device(result)
return Command('get', [ROOT_DEVICES, device_id],
process_result=process_result) | 0.006689 |
def put(self, measurementId, deviceId):
"""
Fails the measurement for this device.
:param measurementId: the measurement name.
:param deviceId: the device name.
:return: 200 if
"""
payload = request.get_json()
failureReason = json.loads(payload).get('failureReason') if payload is not None else None
logger.warning('Failing measurement ' + measurementId + ' for ' + deviceId + ' because ' + str(failureReason))
if self._measurementController.failMeasurement(measurementId, deviceId, failureReason=failureReason):
logger.warning('Failed measurement ' + measurementId + ' for ' + deviceId)
return None, 200
else:
logger.error('Unable to fail measurement ' + measurementId + ' for ' + deviceId)
return None, 404 | 0.008294 |
def _process_prb_strain_genotype_view(self, limit=None):
"""
Here we fetch the free text descriptions of the phenotype associations.
Triples:
<annot_id> dc:description "description text"
:param limit:
:return:
"""
line_counter = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("Getting genotypes for strains")
raw = '/'.join((self.rawdir, 'prb_strain_genotype_view'))
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for line in filereader:
line_counter += 1
if line_counter == 1:
continue
(strain_key, genotype_key) = line
if self.test_mode is True:
if int(genotype_key) not in self.test_keys.get('genotype') \
and int(strain_key) not in self.test_keys.get('strain'):
continue
strain_id = self.idhash['strain'].get(strain_key)
if strain_id is None:
strain_id = self._makeInternalIdentifier(
'strain', strain_key)
genotype_id = self.idhash['genotype'].get(genotype_key)
if genotype_id is None:
genotype_id = self._makeInternalIdentifier(
'genotype', genotype_key)
if strain_id is not None and genotype_id is not None:
self.strain_to_genotype_map[strain_id] = genotype_id
graph.addTriple(strain_id, self.globaltt['has_genotype'], genotype_id)
# TODO
# verify if this should be contingent on the exactness or not
# if qualifier == 'Exact':
# gu.addTriple(
# graph, strain_id,
# self.globaltt['has_genotype'],
# genotype_id)
# else:
# gu.addXref(graph, strain_id, genotype_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
return | 0.002625 |
def adapt_single_html(html):
"""Adapts a single html document generated by
``.formatters.SingleHTMLFormatter`` to a ``models.Binder``
"""
html_root = etree.fromstring(html)
metadata = parse_metadata(html_root.xpath('//*[@data-type="metadata"]')[0])
id_ = metadata['cnx-archive-uri'] or 'book'
binder = Binder(id_, metadata=metadata)
nav_tree = parse_navigation_html_to_tree(html_root, id_)
body = html_root.xpath('//xhtml:body', namespaces=HTML_DOCUMENT_NAMESPACES)
_adapt_single_html_tree(binder, body[0], nav_tree, top_metadata=metadata)
return binder | 0.001664 |
def ricker(f, length, dt):
"""
A Ricker wavelet.
Args:
f (float): frequency in Haz, e.g. 25 Hz.
length (float): Length in s, e.g. 0.128.
dt (float): sample interval in s, e.g. 0.001.
Returns:
tuple. time basis, amplitude values.
"""
t = np.linspace(-int(length/2), int((length-dt)/2), int(length/dt))
y = (1. - 2.*(np.pi**2)*(f**2)*(t**2))*np.exp(-(np.pi**2)*(f**2)*(t**2))
return t, y | 0.002217 |
def _get_ANSI_colored_font( color ):
''' Returns an ANSI escape code (a string) corresponding to switching the font
to given color, or None, if the given color could not be associated with
the available colors.
See also:
https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
'''
color = (color.replace('-','')).lower()
#
# Bright colors:
#
if color == 'white':
return '\033[97m'
elif color in ['cyan', 'aqua']:
return '\033[96m'
elif color in ['purple', 'magneta', 'fuchsia']:
return '\033[95m'
elif color == 'blue':
return '\033[94m'
elif color in ['yellow', 'gold']:
return '\033[93m'
elif color in ['green', 'lime']:
return '\033[92m'
elif color == 'red':
return '\033[91m'
#
# Dark colors:
#
elif color in ['grey', 'gray', 'silver']:
return '\033[37m'
elif color in ['darkcyan', 'teal']:
return '\033[36m'
elif color in ['darkpurple', 'darkmagneta']:
return '\033[35m'
elif color in ['darkblue', 'navy']:
return '\033[34m'
elif color in ['darkyellow', 'olive']:
return '\033[33m'
elif color == 'darkgreen':
return '\033[32m'
elif color in ['darkred', 'maroon']:
return '\033[31m'
return None | 0.005369 |
def ncores_used(self):
"""
Returns the number of cores used in this moment.
A core is used if there's a job that is running on it.
"""
return sum(task.manager.num_cores for task in self if task.status == task.S_RUN) | 0.011765 |
def get_matching_then_nonmatching_text(string_list, separator='', match_min_size=30, ignore='',
end_characters='.!\r\n'):
# type: (List[str], str, int, str, str) -> str
"""Returns a string containing matching blocks of text in a list of strings followed by non-matching.
Args:
string_list (List[str]): List of strings to match
separator (str): Separator to add between blocks of text. Defaults to ''.
match_min_size (int): Minimum block size to match on. Defaults to 30.
ignore (str): Any characters to ignore in matching. Defaults to ''.
end_characters (str): End characters to look for. Defaults to '.\r\n'.
Returns:
str: String containing matching blocks of text followed by non-matching
"""
def add_separator_if_needed(text_list):
if separator and len(text_list) > 0 and text_list[-1][-len(separator):] != separator:
text_list.append(separator)
a = string_list[0]
for i in range(1, len(string_list)):
b = string_list[i]
combined_len = len(a) + len(b)
result = get_matching_text_in_strs(a, b, match_min_size=match_min_size, ignore=ignore,
end_characters=end_characters)
new_a = a
new_b = b
for text in result:
new_a = new_a.replace(text, '')
new_b = new_b.replace(text, '')
if new_a and new_a in a:
pos_a = a.index(new_a)
else:
pos_a = combined_len
if new_b and new_b in b:
pos_b = b.index(new_b)
else:
pos_b = combined_len
if pos_b > pos_a:
text_1 = new_b
pos_1 = pos_b
text_2 = new_a
pos_2 = pos_a
else:
text_1 = new_a
pos_1 = pos_a
text_2 = new_b
pos_2 = pos_b
output = list()
pos = 0
for text in result:
output.append(text)
pos += len(text)
if text_1 and pos >= pos_1:
add_separator_if_needed(output)
output.append(text_1)
pos += len(text_1)
text_1 = None
if text_2 and pos >= pos_2:
add_separator_if_needed(output)
output.append(text_2)
pos += len(text_2)
text_2 = None
if text_1 and pos_1 == combined_len:
add_separator_if_needed(output)
output.append(text_1)
if text_2 and pos_2 == combined_len:
add_separator_if_needed(output)
output.append(text_2)
a = ''.join(output)
return a | 0.002211 |
def _score(self, state, score_movement=True):
"""Score a state based on how balanced it is. A higher score represents
a more balanced state.
:param state: The state to score.
"""
score = 0
max_score = 0
if state.total_weight:
# Coefficient of variance is a value between 0 and the sqrt(n)
# where n is the length of the series (the number of brokers)
# so those parameters are scaled by (1 / sqrt(# or brokers)) to
# get a value between 0 and 1.
#
# Since smaller imbalance values are preferred use 1 - x so that
# higher scores correspond to more balanced states.
score += self.args.partition_weight_cv_score_weight * \
(1 - state.broker_weight_cv / sqrt(len(state.brokers)))
score += self.args.leader_weight_cv_score_weight * \
(1 - state.broker_leader_weight_cv / sqrt(len(state.brokers)))
score += self.args.topic_broker_imbalance_score_weight * \
(1 - state.weighted_topic_broker_imbalance)
score += self.args.broker_partition_count_score_weight * \
(1 - state.broker_partition_count_cv / sqrt(len(state.brokers)))
score += self.args.broker_leader_count_score_weight * \
(1 - state.broker_leader_count_cv / sqrt(len(state.brokers)))
max_score += self.args.partition_weight_cv_score_weight
max_score += self.args.leader_weight_cv_score_weight
max_score += self.args.topic_broker_imbalance_score_weight
max_score += self.args.broker_partition_count_score_weight
max_score += self.args.broker_leader_count_score_weight
if self.args.max_movement_size is not None and score_movement:
# Avoid potential divide-by-zero error
max_movement = max(self.args.max_movement_size, 1)
score += self.args.movement_size_score_weight * \
(1 - state.movement_size / max_movement)
max_score += self.args.movement_size_score_weight
if self.args.max_leader_changes is not None and score_movement:
# Avoid potential divide-by-zero error
max_leader = max(self.args.max_leader_changes, 1)
score += self.args.leader_change_score_weight * \
(1 - state.leader_movement_count / max_leader)
max_score += self.args.leader_change_score_weight
return score / max_score | 0.001183 |
def create_repository(self, repository, body, params=None):
"""
Registers a shared file system repository.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html>`_
:arg repository: A repository name
:arg body: The repository definition
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg timeout: Explicit operation timeout
:arg verify: Whether to verify the repository after creation
"""
for param in (repository, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path('_snapshot',
repository), params=params, body=body) | 0.004848 |
def files_changed():
"""
Return the list of file changed in the current branch compared to `master`
"""
with chdir(get_root()):
result = run_command('git diff --name-only master...', capture='out')
changed_files = result.stdout.splitlines()
# Remove empty lines
return [f for f in changed_files if f] | 0.002967 |
def add_template_for_node(name, node_id):
"Set the template to use to display the node"
with current_app.app_context():
db.execute(text(fetch_query_string('insert_template.sql')),
name=name, node_id=node_id)
result = db.execute(text(fetch_query_string('select_template.sql')),
name=name, node_id=node_id).fetchall()
if result:
template_id = result[0]['id']
db.execute(text(fetch_query_string('update_template_node.sql')),
template=template_id, node_id=node_id) | 0.00692 |
def guess_manifest_media_type(content):
"""
Guess the media type for the given manifest content
:param content: JSON content of manifest (bytes)
:return: media type (str), or None if unable to guess
"""
encoding = guess_json_utf(content)
try:
manifest = json.loads(content.decode(encoding))
except (ValueError, # Not valid JSON
TypeError, # Not an object
UnicodeDecodeError): # Unable to decode the bytes
logger.exception("Unable to decode JSON")
logger.debug("response content (%s): %r", encoding, content)
return None
try:
return manifest['mediaType']
except KeyError:
# no mediaType key
if manifest.get('schemaVersion') == 1:
return get_manifest_media_type('v1')
logger.warning("no mediaType or schemaVersion=1 in manifest, keys: %s",
manifest.keys()) | 0.001062 |
def get_single_int_autoincrement_colname(table_: Table) -> Optional[str]:
"""
If a table has a single integer ``AUTOINCREMENT`` column, this will
return its name; otherwise, ``None``.
- It's unlikely that a database has >1 ``AUTOINCREMENT`` field anyway, but
we should check.
- SQL Server's ``IDENTITY`` keyword is equivalent to MySQL's
``AUTOINCREMENT``.
- Verify against SQL Server:
.. code-block:: sql
SELECT table_name, column_name
FROM information_schema.columns
WHERE COLUMNPROPERTY(OBJECT_ID(table_schema + '.' + table_name),
column_name,
'IsIdentity') = 1
ORDER BY table_name;
... http://stackoverflow.com/questions/87747
- Also:
.. code-block:: sql
sp_columns 'tablename';
... which is what SQLAlchemy does (``dialects/mssql/base.py``, in
:func:`get_columns`).
"""
n_autoinc = 0
int_autoinc_names = []
for col in table_.columns:
if col.autoincrement:
n_autoinc += 1
if is_sqlatype_integer(col.type):
int_autoinc_names.append(col.name)
if n_autoinc > 1:
log.warning("Table {!r} has {} autoincrement columns",
table_.name, n_autoinc)
if n_autoinc == 1 and len(int_autoinc_names) == 1:
return int_autoinc_names[0]
return None | 0.000705 |
def config_notebook_plotting():
"""
Configure plotting functions for inline plotting within a Jupyter
Notebook shell. This function has no effect when not within a
notebook shell, and may therefore be used within a normal python
script.
"""
# Check whether running within a notebook shell and have
# not already monkey patched the plot function
from sporco.util import in_notebook
module = sys.modules[__name__]
if in_notebook() and module.plot.__name__ == 'plot':
# Set inline backend (i.e. %matplotlib inline) if in a notebook shell
set_notebook_plot_backend()
# Replace plot function with a wrapper function that discards
# its return value (within a notebook with inline plotting, plots
# are duplicated if the return value from the original function is
# not assigned to a variable)
plot_original = module.plot
def plot_wrap(*args, **kwargs):
plot_original(*args, **kwargs)
module.plot = plot_wrap
# Replace surf function with a wrapper function that discards
# its return value (see comment for plot function)
surf_original = module.surf
def surf_wrap(*args, **kwargs):
surf_original(*args, **kwargs)
module.surf = surf_wrap
# Replace contour function with a wrapper function that discards
# its return value (see comment for plot function)
contour_original = module.contour
def contour_wrap(*args, **kwargs):
contour_original(*args, **kwargs)
module.contour = contour_wrap
# Replace imview function with a wrapper function that discards
# its return value (see comment for plot function)
imview_original = module.imview
def imview_wrap(*args, **kwargs):
imview_original(*args, **kwargs)
module.imview = imview_wrap
# Disable figure show method (results in a warning if used within
# a notebook with inline plotting)
import matplotlib.figure
def show_disable(self):
pass
matplotlib.figure.Figure.show = show_disable | 0.00046 |
def create_module(self, course_id, module_name, module_position=None, module_prerequisite_module_ids=None, module_publish_final_grade=None, module_require_sequential_progress=None, module_unlock_at=None):
"""
Create a module.
Create and return a new module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - module[name]
"""The name of the module"""
data["module[name]"] = module_name
# OPTIONAL - module[unlock_at]
"""The date the module will unlock"""
if module_unlock_at is not None:
data["module[unlock_at]"] = module_unlock_at
# OPTIONAL - module[position]
"""The position of this module in the course (1-based)"""
if module_position is not None:
data["module[position]"] = module_position
# OPTIONAL - module[require_sequential_progress]
"""Whether module items must be unlocked in order"""
if module_require_sequential_progress is not None:
data["module[require_sequential_progress]"] = module_require_sequential_progress
# OPTIONAL - module[prerequisite_module_ids]
"""IDs of Modules that must be completed before this one is unlocked.
Prerequisite modules must precede this module (i.e. have a lower position
value), otherwise they will be ignored"""
if module_prerequisite_module_ids is not None:
data["module[prerequisite_module_ids]"] = module_prerequisite_module_ids
# OPTIONAL - module[publish_final_grade]
"""Whether to publish the student's final grade for the course upon
completion of this module."""
if module_publish_final_grade is not None:
data["module[publish_final_grade]"] = module_publish_final_grade
self.logger.debug("POST /api/v1/courses/{course_id}/modules with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules".format(**path), data=data, params=params, single_item=True) | 0.00354 |
def use(self, middleware):
"""
attache middleware
:param middleware:
:return:
"""
logger.debug('use')
logger.debug(middleware)
self.middlewares.append(middleware)
di.injector.register(instance=middleware)
di.bind(middleware, auto=True)
# TODO: should use DI somehow
if check_spec(['send_text_message'], middleware):
self.chat.add_interface(middleware)
return middleware | 0.004073 |
def temp_pyfile(src, ext='.py'):
"""Make a temporary python file, return filename and filehandle.
Parameters
----------
src : string or list of strings (no need for ending newlines if list)
Source code to be written to the file.
ext : optional, string
Extension for the generated file.
Returns
-------
(filename, open filehandle)
It is the caller's responsibility to close the open file and unlink it.
"""
fname = tempfile.mkstemp(ext)[1]
f = open(fname,'w')
f.write(src)
f.flush()
return fname, f | 0.003484 |
def _properties_table(obj, columns=None, exclude_columns=None):
"""
Construct a `~astropy.table.QTable` of source properties from a
`SourceProperties` or `SourceCatalog` object.
Parameters
----------
obj : `SourceProperties` or `SourceCatalog` instance
The object containing the source properties.
columns : str or list of str, optional
Names of columns, in order, to include in the output
`~astropy.table.QTable`. The allowed column names are any
of the attributes of `SourceProperties`.
exclude_columns : str or list of str, optional
Names of columns to exclude from the default properties list
in the output `~astropy.table.QTable`.
Returns
-------
table : `~astropy.table.QTable`
A table of source properties with one row per source.
"""
# default properties
columns_all = ['id', 'xcentroid', 'ycentroid', 'sky_centroid',
'sky_centroid_icrs', 'source_sum', 'source_sum_err',
'background_sum', 'background_mean',
'background_at_centroid', 'xmin', 'xmax', 'ymin',
'ymax', 'min_value', 'max_value', 'minval_xpos',
'minval_ypos', 'maxval_xpos', 'maxval_ypos', 'area',
'equivalent_radius', 'perimeter',
'semimajor_axis_sigma', 'semiminor_axis_sigma',
'eccentricity', 'orientation', 'ellipticity',
'elongation', 'covar_sigx2', 'covar_sigxy',
'covar_sigy2', 'cxx', 'cxy', 'cyy']
table_columns = None
if exclude_columns is not None:
table_columns = [s for s in columns_all if s not in exclude_columns]
if columns is not None:
table_columns = np.atleast_1d(columns)
if table_columns is None:
table_columns = columns_all
tbl = QTable()
for column in table_columns:
values = getattr(obj, column)
if isinstance(obj, SourceProperties):
# turn scalar values into length-1 arrays because QTable
# column assignment requires an object with a length
values = np.atleast_1d(values)
# Unfortunately np.atleast_1d creates an array of SkyCoord
# instead of a SkyCoord array (Quantity does work correctly
# with np.atleast_1d). Here we make a SkyCoord array for
# the output table column.
if isinstance(values[0], SkyCoord):
values = SkyCoord(values) # length-1 SkyCoord array
tbl[column] = values
return tbl | 0.000386 |
def is_displayed(self):
"""
:return: False if element is not present in the DOM or invisible, otherwise True.
Ignore implicit and element timeouts and execute immediately.
To wait when element displayed or not, use ``waiter.wait_displayed`` or ``waiter.wait_not_displayed``
"""
t = self.wait_timeout
self.wait_timeout = 0
try:
return super(PageElement, self).is_displayed()
except NoSuchElementException:
return False
finally:
self.wait_timeout = t | 0.006981 |
def get_log_likelihood(inputs,data,clust):
"""Get the LL of a combined set of clusters, ignoring time series offsets.
Get the log likelihood of a cluster without worrying about the fact
different time series are offset. We're using it here really for those
cases in which we only have one cluster to get the loglikelihood of.
arguments:
inputs -- the 'X's in a list, one item per cluster
data -- the 'Y's in a list, one item per cluster
clust -- list of clusters to use
returns a tuple:
log likelihood and the offset (which is always zero for this model)
"""
S = data[0].shape[0] #number of time series
#build a new dataset from the clusters, by combining all clusters together
X = np.zeros([0,1])
Y = np.zeros([0,S])
#for each person in the cluster,
#add their inputs and data to the new dataset
for p in clust:
X = np.vstack([X,inputs[p]])
Y = np.vstack([Y,data[p].T])
#find the loglikelihood. We just add together the LL for each time series.
#ll=0
#for s in range(S):
# m = GPy.models.GPRegression(X,Y[:,s][:,None])
# m.optimize()
# ll+=m.log_likelihood()
m = GPy.models.GPRegression(X,Y)
m.optimize()
ll=m.log_likelihood()
return ll,0 | 0.019742 |
def _updateKW(image, filename, exten, skyKW, Value):
"""update the header with the kw,value"""
# Update the value in memory
image.header[skyKW] = Value
# Now update the value on disk
if isinstance(exten,tuple):
strexten = '[%s,%s]'%(exten[0],str(exten[1]))
else:
strexten = '[%s]'%(exten)
log.info('Updating keyword %s in %s' % (skyKW, filename + strexten))
fobj = fileutil.openImage(filename, mode='update', memmap=False)
fobj[exten].header[skyKW] = (Value, 'Sky value computed by AstroDrizzle')
fobj.close() | 0.008834 |
def read(self):
"""
Reads enough bytes from ``open_stream_in`` to fill the ``width``
(if available) and converts them to an ``int``. Returns this ``int``.
"""
int_ = bytes_to_int(self.open_stream_in.read(math.ceil(self.width / 8)), self.width)
self.repr_.setvalue(int_)
return self.value.getvalue() | 0.035144 |
def _search_generator(self, item: Any) -> Generator[Tuple[Any, Any], None, None]:
"""A helper method for `self.search` that returns a generator rather than a list."""
results = 0
for key, value in self.enumerate(item):
yield key, value
results += 1
if results == 0:
raise SearchError(str(item)) | 0.01105 |
def _simplify_shape(self, alist, rec=0):
"""Reduce the alist dimension if needed"""
if rec != 0:
if len(alist) == 1:
return alist[-1]
return alist
if len(alist) == 1:
return self._simplify_shape(alist[-1], 1)
return [self._simplify_shape(al, 1) for al in alist] | 0.005666 |
def set_note_footer(data, trigger):
"""
handle the footer of the note
"""
footer = ''
if data.get('link'):
provided_by = _('Provided by')
provided_from = _('from')
footer_from = "<br/><br/>{} <em>{}</em> {} <a href='{}'>{}</a>"
footer = footer_from.format(
provided_by, trigger.trigger.description, provided_from,
data.get('link'), data.get('link'))
return footer | 0.004024 |
def needs_quotes(s):
"""Checks whether a string is a dot language ID.
It will check whether the string is solely composed
by the characters allowed in an ID or not.
If the string is one of the reserved keywords it will
need quotes too but the user will need to add them
manually.
"""
# If the name is a reserved keyword it will need quotes but pydot
# can't tell when it's being used as a keyword or when it's simply
# a name. Hence the user needs to supply the quotes when an element
# would use a reserved keyword as name. This function will return
# false indicating that a keyword string, if provided as-is, won't
# need quotes.
if s in DOT_KEYWORDS:
return False
chars = [ord(c) for c in s if ord(c)>0x7f or ord(c)==0]
if chars and not ID_RE_DBL_QUOTED.match(s) and not ID_RE_HTML.match(s):
return True
for test_re in [ID_RE_ALPHA_NUMS, ID_RE_NUM, ID_RE_DBL_QUOTED, ID_RE_HTML, ID_RE_ALPHA_NUMS_WITH_PORTS]:
if test_re.match(s):
return False
m = ID_RE_WITH_PORT.match(s)
if m:
return needs_quotes(m.group(1)) or needs_quotes(m.group(2))
return True | 0.003398 |
def expect_keyword(lexer: Lexer, value: str) -> Token:
"""Expect the next token to be a given keyword.
If the next token is a given keyword, return that token after advancing the lexer.
Otherwise, do not change the parser state and throw an error.
"""
token = lexer.token
if token.kind == TokenKind.NAME and token.value == value:
lexer.advance()
return token
raise GraphQLSyntaxError(
lexer.source, token.start, f"Expected {value!r}, found {token.desc}"
) | 0.003899 |
def increment_frame(self):
"""Increment a frame of the animation."""
self.current_frame += 1
if self.current_frame >= self.end_frame:
# Wrap back to the beginning of the animation.
self.current_frame = 0 | 0.007937 |
def dumps(collection: BioCCollection, pretty_print: bool = True) -> str:
"""
Serialize ``collection`` to a BioC formatted ``str``.
Args:
collection: the BioC collection
pretty_print: enables formatted XML
Returns:
a BioC formatted ``str``
"""
doc = etree.ElementTree(BioCXMLEncoder().encode(collection))
s = etree.tostring(doc, pretty_print=pretty_print, encoding=collection.encoding,
standalone=collection.standalone)
return s.decode(collection.encoding) | 0.003636 |
def _calc_ML(sampler, modelidx=0, e_range=None, e_npoints=100):
"""Get ML model from blob or compute them from chain and sampler.modelfn
"""
ML, MLp, MLerr, ML_model = find_ML(sampler, modelidx)
if e_range is not None:
# prepare bogus data for calculation
e_range = validate_array(
"e_range", u.Quantity(e_range), physical_type="energy"
)
e_unit = e_range.unit
energy = (
np.logspace(
np.log10(e_range[0].value),
np.log10(e_range[1].value),
e_npoints,
)
* e_unit
)
data = {
"energy": energy,
"flux": np.zeros(energy.shape) * sampler.data["flux"].unit,
}
modelout = sampler.modelfn(MLp, data)
if isinstance(modelout, np.ndarray):
blob = modelout
else:
blob = modelout[modelidx]
if isinstance(blob, u.Quantity):
modelx = data["energy"].copy()
model_ML = blob.copy()
elif len(blob) == 2:
modelx = blob[0].copy()
model_ML = blob[1].copy()
else:
raise TypeError("Model {0} has wrong blob format".format(modelidx))
ML_model = (modelx, model_ML)
return ML, MLp, MLerr, ML_model | 0.000758 |
def get_download_total(rows):
"""Return the total downloads, and the downloads column"""
headers = rows.pop(0)
index = headers.index('download_count')
total_downloads = sum(int(row[index]) for row in rows)
rows.insert(0, headers)
return total_downloads, index | 0.003521 |
def add_episode(db, aid, episode):
"""Add an episode."""
values = {
'aid': aid,
'type': episode.type,
'number': episode.number,
'title': episode.title,
'length': episode.length,
}
upsert(db, 'episode', ['aid', 'type', 'number'], values) | 0.003425 |
def p_top(p):
"""
top :
| top stmt
"""
if len(p) == 1:
p[0] = node.stmt_list()
else:
p[0] = p[1]
p[0].append(p[2]) | 0.005952 |
def set_selection_strategy(self, strategy='spectral-oasis', nsel=1, neig=None):
""" Defines the column selection strategy
Parameters
----------
strategy : str
One of the following strategies to select new columns:
random : randomly choose from non-selected columns
oasis : maximal approximation error in the diagonal of :math:`A`
spectral-oasis : selects the nsel columns that are most distanced in the oASIS-error-scaled dominant eigenspace
nsel : int
number of columns to be selected in each round
neig : int or None, optional, default None
Number of eigenvalues to be optimized by the selection process.
If None, use the whole available eigenspace
"""
self._selection_strategy = selection_strategy(self, strategy, nsel, neig) | 0.004556 |
def setup_jukebox_logger():
"""Setup the jukebox top-level logger with handlers
The logger has the name ``jukebox`` and is the top-level logger for all other loggers of jukebox.
It does not propagate to the root logger, because it also has a StreamHandler and that might cause double output.
The logger default level is defined in the constants :data:`jukeboxcore.constants.DEFAULT_LOGGING_LEVEL` but can be overwritten by the environment variable \"JUKEBOX_LOG_LEVEL\"
:returns: None
:rtype: None
:raises: None
"""
log = logging.getLogger("jb")
log.propagate = False
handler = logging.StreamHandler(sys.stdout)
fmt = "%(levelname)-8s:%(name)s: %(message)s"
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
log.addHandler(handler)
level = DEFAULT_LOGGING_LEVEL
log.setLevel(level) | 0.004603 |
def read_certificate_signing_request_status(self, name, **kwargs):
"""
read status of the specified CertificateSigningRequest
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_certificate_signing_request_status(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CertificateSigningRequest (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1CertificateSigningRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_certificate_signing_request_status_with_http_info(name, **kwargs)
else:
(data) = self.read_certificate_signing_request_status_with_http_info(name, **kwargs)
return data | 0.004735 |
def circuit_to_instruction(circuit):
"""Build an ``Instruction`` object from a ``QuantumCircuit``.
The instruction is anonymous (not tied to a named quantum register),
and so can be inserted into another circuit. The instruction will
have the same string name as the circuit.
Args:
circuit (QuantumCircuit): the input circuit.
Return:
Instruction: an instruction equivalent to the action of the
input circuit. Upon decomposition, this instruction will
yield the components comprising the original circuit.
"""
instruction = Instruction(name=circuit.name,
num_qubits=sum([qreg.size for qreg in circuit.qregs]),
num_clbits=sum([creg.size for creg in circuit.cregs]),
params=[])
instruction.control = None
def find_bit_position(bit):
"""find the index of a given bit (Register, int) within
a flat ordered list of bits of the circuit
"""
if isinstance(bit[0], QuantumRegister):
ordered_regs = circuit.qregs
else:
ordered_regs = circuit.cregs
reg_index = ordered_regs.index(bit[0])
return sum([reg.size for reg in ordered_regs[:reg_index]]) + bit[1]
definition = circuit.data.copy()
if instruction.num_qubits > 0:
q = QuantumRegister(instruction.num_qubits, 'q')
if instruction.num_clbits > 0:
c = ClassicalRegister(instruction.num_clbits, 'c')
definition = list(map(lambda x:
(x[0],
list(map(lambda y: (q, find_bit_position(y)), x[1])),
list(map(lambda y: (c, find_bit_position(y)), x[2]))), definition))
instruction.definition = definition
return instruction | 0.002728 |
def update(self, notification_level):
"""
Update the UserChannelInstance
:param UserChannelInstance.NotificationLevel notification_level: The push notification level to assign to the User Channel
:returns: Updated UserChannelInstance
:rtype: twilio.rest.chat.v2.service.user.user_channel.UserChannelInstance
"""
data = values.of({'NotificationLevel': notification_level, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return UserChannelInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
user_sid=self._solution['user_sid'],
channel_sid=self._solution['channel_sid'],
) | 0.004988 |
def find_by_dynamic_locator(self, template_locator, variables, find_all=False, search_object=None):
'''
Find with dynamic locator
@type template_locator: webdriverwrapper.support.locator.Locator
@param template_locator: Template locator w/ formatting bits to insert
@type variables: dict
@param variables: Dictionary of variable substitutions
@type find_all: bool
@param find_all: True to find all elements immediately, False for find single element only
@type search_object: webdriverwrapper.WebElementWrapper
@param search_object: Optional WebElement to start search with.
If null, search will be on self.driver
@rtype: webdriverwrapper.WebElementWrapper or list()
@return: Single WebElemetnWrapper if find_all is False,
list of WebElementWrappers if find_all is True
'''
template_variable_character = '%'
# raise an exception if user passed non-dictionary variables
if not isinstance(variables, dict):
raise TypeError('You must use a dictionary to populate locator variables')
# replace all variables that match the keys in 'variables' dict
locator = ""
for key in variables.keys():
locator = template_locator.replace(template_variable_character + key, variables[key])
return self.find(locator, find_all, search_object) | 0.007348 |
def to_kwargs(triangles):
"""
Convert a list of triangles to the kwargs for the Trimesh
constructor.
Parameters
---------
triangles : (n, 3, 3) float
Triangles in space
Returns
---------
kwargs : dict
Keyword arguments for the trimesh.Trimesh constructor
Includes keys 'vertices' and 'faces'
Examples
---------
>>> mesh = trimesh.Trimesh(**trimesh.triangles.to_kwargs(triangles))
"""
triangles = np.asanyarray(triangles, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('Triangles must be (n,3,3)!')
vertices = triangles.reshape((-1, 3))
faces = np.arange(len(vertices)).reshape((-1, 3))
kwargs = {'vertices': vertices,
'faces': faces}
return kwargs | 0.001248 |
def __update_cursor_info(self):
""" Map the mouse to the 1-d position within the line graph. """
if not self.delegate: # allow display to work without delegate
return
if self.__mouse_in and self.__last_mouse:
pos_1d = None
axes = self.__axes
line_graph_canvas_item = self.line_graph_canvas_item
if axes and axes.is_valid and line_graph_canvas_item:
mouse = self.map_to_canvas_item(self.__last_mouse, line_graph_canvas_item)
plot_rect = line_graph_canvas_item.canvas_bounds
if plot_rect.contains_point(mouse):
mouse = mouse - plot_rect.origin
x = float(mouse.x) / plot_rect.width
px = axes.drawn_left_channel + x * (axes.drawn_right_channel - axes.drawn_left_channel)
pos_1d = px,
self.delegate.cursor_changed(pos_1d) | 0.004242 |
def range(self, start=None, stop=None, months=0, days=0):
"""
Return a new query that fetches metrics within a certain date range.
```python
query.range('2014-01-01', '2014-06-30')
```
If you don't specify a `stop` argument, the date range will end today. If instead
you meant to fetch just a single day's results, try:
```python
query.range('2014-01-01', days=1)
```
More generally, you can specify that you'd like a certain number of days,
starting from a certain date:
```python
query.range('2014-01-01', months=3)
query.range('2014-01-01', days=28)
```
Note that if you don't specify a granularity (either through the `interval`
method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly`
shortcut methods) you will get only a single result, encompassing the
entire date range, per metric.
**Note:** it is currently not possible to easily specify that you'd like
to query the last last full week(s), month(s) et cetera.
This will be added sometime in the future.
"""
start, stop = utils.date.range(start, stop, months, days)
self.raw.update({
'start_date': start,
'end_date': stop,
})
return self | 0.005106 |
def over(self, state, fn):
# type: (S, Callable[[A], B]) -> T
'''Applies a function `fn` to all the foci within `state`.
Requires kind Setter. This method will raise TypeError when the
optic has no way to set foci.
'''
if not self._is_kind(Setter):
raise TypeError('Must be an instance of Setter to .over()')
pure = lambda a: Identity(a)
func = lambda a: Identity(fn(a))
return self.apply(func, pure, state).unwrap() | 0.00996 |
def write_journal(self, journal_file_path):
"""Write the constructed journal in to the provided file.
Args:
journal_file_path (str): full path to output journal file
"""
# TODO: assert the extension is txt and not other
with open(journal_file_path, "w") as jrn_file:
jrn_file.write(self._journal_contents) | 0.005405 |
def cookie_get(self, name):
"""
Check for a cookie value by name.
:param str name: Name of the cookie value to retreive.
:return: Returns the cookie value if it's set or None if it's not found.
"""
if not hasattr(self, 'cookies'):
return None
if self.cookies.get(name):
return self.cookies.get(name).value
return None | 0.035294 |
def get_output(self):
"""Get the output of a reading job as a list of filenames."""
logger.info("Getting outputs.")
# Get the set of prefixes (each will correspond to three json files.)
json_files = glob.glob(path.join(self.output_dir, '*.json'))
json_prefixes = set()
for json_file in json_files:
# Remove .uaz.<subfile type>.json
prefix = '.'.join(path.basename(json_file).split('.')[:-3])
json_prefixes.add(path.join(self.output_dir, prefix))
# Join each set of json files and store the json dict.
for prefix in json_prefixes:
base_prefix = path.basename(prefix)
if base_prefix.isdecimal():
base_prefix = int(base_prefix)
elif base_prefix in self.id_maps.keys():
base_prefix = self.id_maps[base_prefix]
try:
content = self._join_json_files(prefix, clear=True)
except Exception as e:
logger.exception(e)
logger.error("Could not load result for prefix %s." % prefix)
content = None
self.add_result(base_prefix, content)
logger.debug('Joined files for prefix %s.' % base_prefix)
return self.results | 0.001554 |
def create_schema(self, hash_key_name, hash_key_proto_value,
range_key_name=None, range_key_proto_value=None):
"""
Create a Schema object used when creating a Table.
:type hash_key_name: str
:param hash_key_name: The name of the HashKey for the schema.
:type hash_key_proto_value: int|long|float|str|unicode
:param hash_key_proto_value: A sample or prototype of the type
of value you want to use for the HashKey.
:type range_key_name: str
:param range_key_name: The name of the RangeKey for the schema.
This parameter is optional.
:type range_key_proto_value: int|long|float|str|unicode
:param range_key_proto_value: A sample or prototype of the type
of value you want to use for the RangeKey. This parameter
is optional.
"""
schema = {}
hash_key = {}
hash_key['AttributeName'] = hash_key_name
hash_key_type = self.get_dynamodb_type(hash_key_proto_value)
hash_key['AttributeType'] = hash_key_type
schema['HashKeyElement'] = hash_key
if range_key_name and range_key_proto_value is not None:
range_key = {}
range_key['AttributeName'] = range_key_name
range_key_type = self.get_dynamodb_type(range_key_proto_value)
range_key['AttributeType'] = range_key_type
schema['RangeKeyElement'] = range_key
return Schema(schema) | 0.002642 |
async def list_vms(self, preset_name):
'''
List VMs by preset name
:arg present_name: string
'''
response = await self.nova.servers.list(name=f'^{preset_name}$')
result = []
for server in response['servers']:
result.append(self._map_vm_structure(server))
return result | 0.005797 |
def parse_dirname(fc_dir):
"""Parse the flow cell ID and date from a flow cell directory.
"""
(_, fc_dir) = os.path.split(fc_dir)
parts = fc_dir.split("_")
name = None
date = None
for p in parts:
if p.endswith(("XX", "xx", "XY", "X2")):
name = p
elif len(p) == 6:
try:
int(p)
date = p
except ValueError:
pass
if name is None or date is None:
raise ValueError("Did not find flowcell name: %s" % fc_dir)
return name, date | 0.001776 |
def in_span(loc: int, span: Span) -> bool:
"""Checks if loc is inside span"""
if loc >= span[0] and loc <= span[1]:
return True
else:
return False | 0.005714 |
def languages(self):
"""Languages.
Returns a set of languages in lower-case.
:return:
Returns a set of languages in lower-case (strings).
"""
result = set()
languages = self._safe_get_element('ItemAttributes.Languages')
if languages is not None:
for language in languages.iterchildren():
text = self._safe_get_element_text('Name', language)
if text:
result.add(text.lower())
return result | 0.003774 |
def _render_closure(self):
'''Use a closure so that draw attributes can be saved'''
fillcolor = self.fill
strokecolor = self.stroke
strokewidth = self.strokewidth
def _render(cairo_ctx):
'''
At the moment this is based on cairo.
TODO: Need to work out how to move the cairo specific
bits somewhere else.
'''
# Go to initial point (CORNER or CENTER):
transform = self._call_transform_mode(self._transform)
if fillcolor is None and strokecolor is None:
# Fixes _bug_FillStrokeNofillNostroke.bot
return
cairo_ctx.set_matrix(transform)
# Run the path commands on the cairo context:
self._traverse(cairo_ctx)
# Matrix affects stroke, so we need to reset it:
cairo_ctx.set_matrix(cairo.Matrix())
if fillcolor is not None and strokecolor is not None:
if strokecolor[3] < 1:
# Draw onto intermediate surface so that stroke
# does not overlay fill
cairo_ctx.push_group()
cairo_ctx.set_source_rgba(*fillcolor)
cairo_ctx.fill_preserve()
e = cairo_ctx.stroke_extents()
cairo_ctx.set_source_rgba(*strokecolor)
cairo_ctx.set_operator(cairo.OPERATOR_SOURCE)
cairo_ctx.set_line_width(strokewidth)
cairo_ctx.stroke()
cairo_ctx.pop_group_to_source()
cairo_ctx.paint()
else:
# Fast path if no alpha in stroke
cairo_ctx.set_source_rgba(*fillcolor)
cairo_ctx.fill_preserve()
cairo_ctx.set_source_rgba(*strokecolor)
cairo_ctx.set_line_width(strokewidth)
cairo_ctx.stroke()
elif fillcolor is not None:
cairo_ctx.set_source_rgba(*fillcolor)
cairo_ctx.fill()
elif strokecolor is not None:
cairo_ctx.set_source_rgba(*strokecolor)
cairo_ctx.set_line_width(strokewidth)
cairo_ctx.stroke()
return _render | 0.000857 |
def mount(self, volume):
"""Mounts the given volume on the provided mountpoint. The default implementation simply calls mount.
:param Volume volume: The volume to be mounted
:param mountpoint: The file system path to mount the filesystem on.
:raises UnsupportedFilesystemError: when the volume system type can not be mounted.
"""
volume._make_mountpoint()
try:
self._call_mount(volume, volume.mountpoint, self._mount_type or self.type, self._mount_opts)
except Exception:
# undo the creation of the mountpoint
volume._clear_mountpoint()
raise | 0.007622 |
def get_server(key, server=MAIN_SERVER, servers=LOAD_SERVERS):
""" given a key, get the IP address of the server that has the pvt key that
owns the name/key
"""
namecoind = NamecoindClient(NAMECOIND_SERVER, NAMECOIND_PORT,
NAMECOIND_USER, NAMECOIND_PASSWD)
info = namecoind.name_show(key)
if 'address' in info:
return check_address(info['address'], server, servers)
response = {}
response["registered"] = False
response["server"] = None
response["ismine"] = False
return response | 0.001748 |
def revision(self):
"""The name of the feature branch (a string)."""
location, _, revision = self.expression.partition('#')
return revision if location and revision else self.expression | 0.009569 |
def __deleteOutputCache(self, modelID):
"""
Delete's the output cache associated with the given modelID. This actually
clears up the resources associated with the cache, rather than deleting al
the records in the cache
Parameters:
-----------------------------------------------------------------------
modelID: The id of the model whose output cache is being deleted
"""
# If this is our output, we should close the connection
if modelID == self._modelID and self._predictionLogger is not None:
self._predictionLogger.close()
del self.__predictionCache
self._predictionLogger = None
self.__predictionCache = None | 0.007299 |
def register_adapter(from_classes, to_classes, func):
"""
Register a function that can handle adapting from `from_classes` to `to_classes`.
"""
assert from_classes, 'Must supply classes to adapt from'
assert to_classes, 'Must supply classes to adapt to'
assert func, 'Must supply adapter function'
if not isinstance(from_classes, (tuple, list)):
from_classes = [from_classes]
if not isinstance(to_classes, (tuple, list)):
to_classes = [to_classes]
for key in itertools.product(from_classes, to_classes):
if key in __adapters__:
raise AdapterExists('%r to %r already exists.' % key)
__adapters__[key] = func | 0.002907 |
def get_all_longest_col_lengths(self):
"""
iterate over all columns and get their longest values
:return: dict, {"column_name": 132}
"""
response = {}
for col in self.col_list:
response[col] = self._longest_val_in_column(col)
return response | 0.006452 |
def refresh(self, index=None):
"""Refresh tabwidget"""
if index is None:
index = self.get_stack_index()
# Set current editor
if self.get_stack_count():
index = self.get_stack_index()
finfo = self.data[index]
editor = finfo.editor
editor.setFocus()
self._refresh_outlineexplorer(index, update=False)
self.__refresh_statusbar(index)
self.__refresh_readonly(index)
self.__check_file_status(index)
self.__modify_stack_title()
self.update_plugin_title.emit()
else:
editor = None
# Update the modification-state-dependent parameters
self.modification_changed()
# Update FindReplace binding
self.find_widget.set_editor(editor, refresh=False) | 0.002296 |
def _handle_shift(self, other: Union[int, "BitVec"], operator: Callable) -> "BitVec":
"""
Handles shift
:param other: The other BitVector
:param operator: The shift operator
:return: the resulting output
"""
if isinstance(other, BitVecFunc):
return operator(other, self)
if not isinstance(other, BitVec):
return BitVec(
operator(self.raw, other), annotations=self.annotations
)
union = self.annotations + other.annotations
return BitVec(operator(self.raw, other.raw), annotations=union) | 0.004854 |
def plot_final(self, ax):
'''
Plots the final de-trended light curve.
'''
# Plot the light curve
bnmask = np.array(
list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int)
def M(x): return np.delete(x, bnmask)
if (self.cadence == 'lc') or (len(self.time) < 4000):
ax.plot(M(self.time), M(self.flux), ls='none',
marker='.', color='k', markersize=2, alpha=0.3)
else:
ax.plot(M(self.time), M(self.flux), ls='none', marker='.',
color='k', markersize=2, alpha=0.03, zorder=-1)
ax.set_rasterization_zorder(0)
# Hack: Plot invisible first and last points to ensure
# the x axis limits are the
# same in the other plots, where we also plot outliers!
ax.plot(self.time[0], np.nanmedian(M(self.flux)), marker='.', alpha=0)
ax.plot(self.time[-1], np.nanmedian(M(self.flux)), marker='.', alpha=0)
# Plot the GP (long cadence only)
if self.cadence == 'lc':
gp = GP(self.kernel, self.kernel_params, white=False)
gp.compute(self.apply_mask(self.time),
self.apply_mask(self.fraw_err))
med = np.nanmedian(self.apply_mask(self.flux))
y, _ = gp.predict(self.apply_mask(self.flux) - med, self.time)
y += med
ax.plot(M(self.time), M(y), 'r-', lw=0.5, alpha=0.5)
# Compute the CDPP of the GP-detrended flux
self.cdppg = self._mission.CDPP(self.apply_mask(
self.flux - y + med), cadence=self.cadence)
else:
# We're not going to calculate this
self.cdppg = 0.
# Appearance
ax.annotate('Final', xy=(0.98, 0.025), xycoords='axes fraction',
ha='right', va='bottom', fontsize=10, alpha=0.5,
fontweight='bold')
ax.margins(0.01, 0.1)
# Get y lims that bound 99% of the flux
flux = np.delete(self.flux, bnmask)
N = int(0.995 * len(flux))
hi, lo = flux[np.argsort(flux)][[N, -N]]
fsort = flux[np.argsort(flux)]
pad = (hi - lo) * 0.1
ylim = (lo - pad, hi + pad)
ax.set_ylim(ylim)
ax.get_yaxis().set_major_formatter(Formatter.Flux) | 0.000859 |
def register_proper_name(self, name):
"""Registers a proper name to the database."""
with self.proper_names_db_path.open("a") as f:
f.write(u"{0}\n".format(name)) | 0.010526 |
def multiSMC(nruns=10, nprocs=0, out_func=None, **args):
"""Run SMC algorithms in parallel, for different combinations of parameters.
`multiSMC` relies on the `multiplexer` utility, and obeys the same logic.
A basic usage is::
results = multiSMC(fk=my_fk_model, N=100, nruns=20, nprocs=0)
This runs the same SMC algorithm 20 times, using all available CPU cores.
The output, ``results``, is a list of 20 dictionaries; a given dict corresponds
to a single run, and contains the following (key, value) pairs:
+ ``'run'``: a run identifier (a number between 0 and nruns-1)
+ ``'output'``: the corresponding SMC object (once method run was completed)
Since a `SMC` object may take a lot of space in memory (especially when
the option ``store_history`` is set to True), it is possible to require
`multiSMC` to store only some chosen summary of the SMC runs, using option
`out_func`. For instance, if we only want to store the estimate
of the log-likelihood of the model obtained from each particle filter::
of = lambda pf: pf.logLt
results = multiSMC(fk=my_fk_model, N=100, nruns=20, out_func=of)
It is also possible to vary the parameters. Say::
results = multiSMC(fk=my_fk_model, N=[100, 500, 1000])
will run the same SMC algorithm 30 times: 10 times for N=100, 10 times for
N=500, and 10 times for N=1000. The number 10 comes from the fact that we
did not specify nruns, and its default value is 10. The 30 dictionaries
obtained in results will then contain an extra (key, value) pair that will
give the value of N for which the run was performed.
It is possible to vary several arguments. Each time a list must be
provided. The end result will amount to take a *cartesian product* of the
arguments::
results = multiSMC(fk=my_fk_model, N=[100, 1000], resampling=['multinomial',
'residual'], nruns=20)
In that case we run our algorithm 80 times: 20 times with N=100 and
resampling set to multinomial, 20 times with N=100 and resampling set to
residual and so on.
Parameters
----------
* nruns: int, optional
number of runs (default is 10)
* nprocs: int, optional
number of processors to use; if negative, number of cores not to use.
Default value is 1 (no multiprocessing)
* out_func: callable, optional
function to transform the output of each SMC run. (If not given, output
will be the complete SMC object).
* args: dict
arguments passed to SMC class
Returns
-------
A list of dicts
See also
--------
`utils.multiplexer`: for more details on the syntax.
"""
def f(**args):
pf = SMC(**args)
pf.run()
return out_func(pf)
if out_func is None:
out_func = lambda x: x
return utils.multiplexer(f=f, nruns=nruns, nprocs=nprocs, seeding=True,
**args) | 0.008262 |
def parse_recipients(header, reference_id=None):
"""\
Returns the recipients of the cable as (maybe empty) list.
"""
m = _TO_PATTERN.search(header)
if not m:
if reference_id and reference_id not in _CABLES_WITHOUT_TO:
logger.warn('No TO header found in "%s", header: "%s"' % (reference_id, header))
return []
to_header = m.group(1)
return _route_recipient_from_header(to_header, reference_id) | 0.004464 |
def from_entity(entity, self_user_id):
"""Construct user from ``Entity`` message.
Args:
entity: ``Entity`` message.
self_user_id (~hangups.user.UserID or None): The ID of the current
user. If ``None``, assume ``entity`` is the current user.
Returns:
:class:`~hangups.user.User` object.
"""
user_id = UserID(chat_id=entity.id.chat_id,
gaia_id=entity.id.gaia_id)
return User(user_id, entity.properties.display_name,
entity.properties.first_name,
entity.properties.photo_url,
entity.properties.email,
(self_user_id == user_id) or (self_user_id is None)) | 0.002642 |
def need_record_permission(factory_name):
"""Decorator checking that the user has the required permissions on record.
:param factory_name: name of the permission factory.
"""
def need_record_permission_builder(f):
@wraps(f)
def need_record_permission_decorator(self, record=None, *args,
**kwargs):
permission_factory = (
getattr(self, factory_name) or
getattr(current_records_rest, factory_name)
)
# FIXME use context instead
request._methodview = self
if permission_factory:
verify_record_permission(permission_factory, record)
return f(self, record=record, *args, **kwargs)
return need_record_permission_decorator
return need_record_permission_builder | 0.001156 |
def mskWshape(W, cri):
"""Get appropriate internal shape (see
:class:`CSC_ConvRepIndexing` and :class:`CDU_ConvRepIndexing`) for
data fidelity term mask array `W`. The external shape of `W`
depends on the external shape of input data array `S`. The
simplest criterion for ensuring that the external `W` is
compatible with `S` is to ensure that `W` has the same shape as
`S`, except that non-singleton dimensions in `S` may be singleton
dimensions in `W`. If `W` has a single non-spatial axis, it is
assigned as a channel or multi-signal axis depending on the
corresponding assignement in `S`.
Parameters
----------
W : array_like
Data fidelity term weight/mask array
cri : :class:`CSC_ConvRepIndexing` object or :class:`CDU_ConvRepIndexing`\
object
Object specifying convolutional representation dimensions
Returns
-------
shp : tuple
Appropriate internal mask array shape
"""
# Number of axes in W available for C and/or K axes
ckdim = W.ndim - cri.dimN
if ckdim >= 2:
# Both C and K axes are present in W
shpW = W.shape + (1,) if ckdim == 2 else W.shape
elif ckdim == 1:
# Exactly one of C or K axes is present in W
if cri.C == 1 and cri.K > 1:
# Input S has a single channel and multiple signals
shpW = W.shape[0:cri.dimN] + (1, W.shape[cri.dimN]) + (1,)
elif cri.C > 1 and cri.K == 1:
# Input S has multiple channels and a single signal
shpW = W.shape[0:cri.dimN] + (W.shape[cri.dimN], 1) + (1,)
else:
# Input S has multiple channels and signals: resolve ambiguity
# by taking extra axis in W as a channel axis
shpW = W.shape[0:cri.dimN] + (W.shape[cri.dimN], 1) + (1,)
else:
# Neither C nor K axis is present in W
shpW = W.shape + (1,) * (3 - ckdim)
return shpW | 0.000516 |
def _extract_lookup(self, key):
"""Extract lookup method based on key name format"""
parts = key.split('__')
# 'exact' is the default lookup if there was no explicit comparison op in `key`
# Assume there is only one `__` in the key.
# FIXME Change for child attribute query support
op = 'exact' if len(parts) == 1 else parts[1]
# Construct and assign the lookup class as a filter criteria
return parts[0], self.get_lookup(op) | 0.006073 |
def set(self, value, metadata=dict(), content_type=None):
"""Sets the key to the given value."""
return self._boto_object.put(Body=value, Metadata=metadata, ContentType=content_type) | 0.015152 |
def email_address(self, address, owner=None, **kwargs):
"""
Create the Email Address TI object.
Args:
owner:
address:
**kwargs:
Return:
"""
return EmailAddress(self.tcex, address, owner=owner, **kwargs) | 0.00692 |
def addex(extype, exmsg, condition=None, edata=None):
r"""
Add an exception in the global exception handler.
:param extype: Exception type; *must* be derived from the `Exception
<https://docs.python.org/2/library/exceptions.html#
exceptions.Exception>`_ class
:type extype: Exception type object, i.e. RuntimeError, TypeError,
etc.
:param exmsg: Exception message; it can contain fields to be replaced
when the exception is raised via
:py:meth:`pexdoc.ExHandle.raise_exception_if`.
A field starts with the characters :code:`'\*['` and
ends with the characters :code:`']\*'`, the field name
follows the same rules as variable names and is between
these two sets of characters. For example,
:code:`'\*[fname]\*'` defines the fname field
:type exmsg: string
:param condition: Flag that indicates whether the exception is
raised *(True)* or not *(False)*. If None the
flag is not used an no exception is raised
:type condition: boolean or None
:param edata: Replacement values for fields in the exception message
(see :py:meth:`pexdoc.ExHandle.add_exception` for how
to define fields). Each dictionary entry can only have
these two keys:
* **field** *(string)* -- Field name
* **value** *(any)* -- Field value, to be converted into
a string with the `format
<https://docs.python.org/2/library/stdtypes.html#
str.format>`_ string method
If None no field replacement is done
:rtype: (if condition is not given or None) function
:raises:
* RuntimeError (Argument \`condition\` is not valid)
* RuntimeError (Argument \`edata\` is not valid)
* RuntimeError (Argument \`exmsg\` is not valid)
* RuntimeError (Argument \`extype\` is not valid)
"""
return _ExObj(extype, exmsg, condition, edata).craise | 0.000461 |
def stratHeun(f, G, y0, tspan, dW=None):
"""Use the Stratonovich Heun algorithm to integrate Stratonovich equation
dy = f(y,t)dt + G(y,t) \circ dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
W. Rumelin (1982) Numerical Treatment of Stochastic Differential
Equations
R. Mannella (2002) Integration of Stochastic Differential Equations
on a Computer
K. Burrage, P. M. Burrage and T. Tian (2004) Numerical methods for strong
solutions of stochastic differential equations: an overview
"""
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
yn = y[n]
dWn = dW[n,:]
fn = f(yn, tn)
Gn = G(yn, tn)
ybar = yn + fn*h + Gn.dot(dWn)
fnbar = f(ybar, tnp1)
Gnbar = G(ybar, tnp1)
y[n+1] = yn + 0.5*(fn + fnbar)*h + 0.5*(Gn + Gnbar).dot(dWn)
return y | 0.00168 |
async def wait_event(self, event, *, timeout=None):
"""
Waits for a custom event to occur. Timeouts still apply.
Unless you're certain that your code will run fast enough,
generally you should get a "handle" of this special coroutine
before acting. Generally, you should do this:
>>> from telethon import TelegramClient, events
>>>
>>> client = TelegramClient(...)
>>>
>>> async def main():
>>> async with client.conversation(...) as conv:
>>> response = conv.wait_event(events.NewMessage(incoming=True))
>>> await conv.send_message('Hi')
>>> response = await response
This way your event can be registered before acting,
since the response may arrive before your event was
registered. It depends on your use case since this
also means the event can arrive before you send
a previous action.
"""
start_time = time.time()
if isinstance(event, type):
event = event()
await event.resolve(self._client)
counter = Conversation._custom_counter
Conversation._custom_counter += 1
future = asyncio.Future(loop=self._client.loop)
# We need the `async def` here because we want to block on the future
# from `_get_result` by using `await` on it. If we returned the future
# immediately we would `del` from `_custom` too early.
async def result():
try:
return await self._get_result(future, start_time, timeout)
finally:
del self._custom[counter]
self._custom[counter] = (event, future)
return await result() | 0.001707 |
def _get_omimtype(entry, globaltt):
"""
(note: there is anlaternative using mimTitle in omia)
Here, we look at the omim 'prefix' to help to type the entry.
For now, we only classify omim entries as genes;
the rest we leave alone.
:param entry:
:return:
"""
# An asterisk (*) before an entry number indicates a gene.
# A number symbol (#) before an entry number indicates
# that it is a descriptive entry, usually of a phenotype,
# and does not represent a unique locus.
# The reason for the use of the number symbol
# is given in the first paragraph of the entry.
# Discussion of any gene(s) related to the phenotype resides in
# another entry(ies) as described in the first paragraph.
#
# A plus sign (+) before an entry number indicates that the
# entry contains the description of a gene of
# known sequence and a phenotype.
#
# A percent sign (%) before an entry number indicates that the
# entry describes a confirmed mendelian phenotype or phenotypic locus
# for which the underlying molecular basis is not known.
#
# No symbol before an entry number generally indicates a
# description of a phenotype for which the mendelian basis,
# although suspected, has not been clearly established
# or that the separateness of this phenotype
# from that in another entry is unclear.
#
# A caret (^) before an entry number means the
# entry no longer exists because it was removed from the database
# or moved to another entry as indicated.
prefix = None
type_id = None
if 'prefix' in entry:
prefix = entry['prefix']
if prefix == '*':
# gene, may not have a known sequence or a phenotype
# note that some genes are also phenotypes,
# even in this class, like 102480
# examples: 102560,102480,100678,102750
type_id = globaltt['gene']
elif prefix == '#':
# phenotype/disease -- indicate that here?
# examples: 104200,105400,114480,115300,121900
# type_id = globaltt['Phenotype'] # 'UPHENO_0001001' # species agnostic
# type_id = globaltt['human phenotypic abnormality']
pass
elif prefix == '+':
# gene of known sequence and has a phenotype
# examples: 107670,110600,126453
type_id = globaltt['gene'] # doublecheck this
elif prefix == '%':
# this is a disease (with a known locus).
# examples include: 102150,104000,107200,100070
type_id = globaltt['heritable_phenotypic_marker']
elif prefix == '':
# this is probably just a phenotype
pass
return type_id | 0.001025 |
def datacenter(self, name):
"""
:param name: location key
:type name: :py:class:`basestring`
:Returns: a new DataCenter object
This method treats the 'name' argument as a location key (on the
`known_locations` attribute dict) or FQDN, and keeps existing
authentication and other configuration from this object.
"""
# The base form of this, as below, simply sets up a redirect.
# j, _ = self.request('GET', 'datacenters/' + str(name))
if name not in self.known_locations and '.' not in name:
self.datacenters()
dc = DataCenter(location=name, headers=self.default_headers,
login=self.login, verbose=self.verbose,
verify=self.verify, known_locations=self.known_locations)
dc.auth = self.auth
return dc | 0.012543 |
def set_size(self, size):
""" Set the size of the map in pixels
This is an expensive operation, do only when absolutely needed.
:param size: (width, height) pixel size of camera/view of the group
"""
buffer_size = self._calculate_zoom_buffer_size(size, self._zoom_level)
self._size = size
self._initialize_buffers(buffer_size) | 0.005208 |
def main():
""" main entry """
options = parse(sys.argv[1:], CLIRULES, ".splunkrc")
if options.kwargs['omode'] not in OUTPUT_MODES:
print("output mode must be one of %s, found %s" % (OUTPUT_MODES,
options.kwargs['omode']))
sys.exit(1)
service = connect(**options.kwargs)
if path.exists(options.kwargs['output']):
if not options.kwargs['recover']:
print("Export file %s exists, and recover option nor specified" % \
options.kwargs['output'])
sys.exit(1)
else:
options.kwargs['end'] = recover(options)
options.kwargs['fixtail'] = True
openmode = "a"
else:
openmode = "w"
options.kwargs['fixtail'] = False
try:
options.kwargs['fd'] = open(options.kwargs['output'], openmode)
except IOError:
print("Failed to open output file %s w/ mode %s" % \
(options.kwargs['output'], openmode))
sys.exit(1)
export(options, service) | 0.00473 |
def get_changes(self, serialized=False, keep=False):
""" Get a journal of changes that have occurred
:param `serialized`:
Return changes in the serialized format used by TaskWarrior.
:param `keep_changes`:
By default, the list of changes is reset after running
``.get_changes``; set this to `True` if you would like to
keep the changes recorded following running this command.
:returns: A dictionary of 2-tuples of changes, where the key is the
name of the field that has changed, and the value is a 2-tuple
containing the original value and the final value respectively.
"""
results = {}
# Check for explicitly-registered changes
for k, f, t in self._changes:
if k not in results:
results[k] = [f, None]
results[k][1] = (
self._serialize(k, t, self._fields)
if serialized else t
)
# Check for changes on subordinate items
for k, v in six.iteritems(self):
if isinstance(v, Dirtyable):
result = v.get_changes(keep=keep)
if result:
if not k in results:
results[k] = [result[0], None]
results[k][1] = (
self._serialize(k, result[1], self._fields)
if serialized else result[1]
)
# Clear out recorded changes
if not keep:
self._changes = []
return results | 0.001869 |
def CreateAdGroup(client, campaign_id):
"""Creates a dynamic remarketing campaign.
Args:
client: an AdWordsClient instance.
campaign_id: an int campaign ID.
Returns:
The ad group that was successfully created.
"""
ad_group_service = client.GetService('AdGroupService', 'v201809')
ad_group = {
'name': 'Dynamic remarketing ad group',
'campaignId': campaign_id,
'status': 'ENABLED'
}
operations = [{
'operator': 'ADD',
'operand': ad_group
}]
return ad_group_service.mutate(operations)['value'][0] | 0.010695 |
def get_package_version():
"""returns package version without importing it"""
base = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(base, "firepit/__init__.py")) as pkg:
for line in pkg:
m = version.match(line.strip())
if not m:
continue
return ".".join(m.groups()[0].split(", ")) | 0.002695 |
def R_package_path(package):
"""
return the path to an installed R package
"""
local_sitelib = R_sitelib()
rscript = Rscript_cmd()
cmd = """{rscript} --no-environ -e '.libPaths(c("{local_sitelib}")); find.package("{package}")'"""
try:
output = subprocess.check_output(cmd.format(**locals()), shell=True)
except subprocess.CalledProcessError as e:
return None
for line in output.decode().split("\n"):
if "[1]" not in line:
continue
dirname = line.split("[1]")[1].replace("\"", "").strip()
if os.path.exists(dirname):
return dirname
return None | 0.003096 |
def calculateLocalElasticitySegments(self, bp, span=2, frameGap=None, helical=False, unit='kT',
err_type='block', tool='gmx analyze', outFile=None):
"""Calculate local elastic properties of consecutive overlapped DNA segments
Calculate local elastic properties of consecutive overlapped DNA segments of length given by `span`.
Parameters
----------
bp : list
List of two base-steps forming the global DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
span : int
Length of overlapping (local) DNA segments. It should be less than four.
frameGap : int
How many frames to skip for next time-frame. Lower the number, slower will be the calculation.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
err_type : str
Error estimation by autocorrelation method ``err_type='acf'`` or
block averaging method ``err_type='block'``
tool : str
GROMACS tool to calculate error. In older versions it is `g_analyze` while in
newer versions (above 2016) it is `gmx analyze`.
outFile : str
Output file in csv format.
Returns
-------
segments : list
list of DNA segments for which local elastic properties was calculated.
elasticities : OrderedDict
A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in
the same order as listed above.
error : OrderedDict
A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in
the same order as listed above..
"""
if helical:
props_name = helical_local_props_vector
else:
props_name = local_props_vector
segments, errors, elasticities = [], OrderedDict(), OrderedDict()
for name in props_name:
elasticities[name] = []
errors[name] = []
for s in range(bp[0], bp[1]):
if s+span-1 > bp[1]:
break
time, elasticity_t = self.getLocalElasticityByTime([s, s+span-1], frameGap=frameGap, helical=helical, unit=unit)
error_t = dnaMD.get_error(time, list(elasticity_t.values()), len(props_name), err_type=err_type, tool=tool)
for i in range(len(props_name)):
esy_t = elasticity_t[props_name[i]][-1] # only take last entry
elasticities[props_name[i]].append(esy_t)
errors[props_name[i]].append(error_t[i])
segments.append('{0}-{1}'.format(s, s+span-1))
# Write output file
if outFile is not None:
with open(outFile, 'w') as fout:
fout.write('#bps')
for name in props_name:
fout.write(', {0}, {0}-error'.format(name))
fout.write('\n')
for s in range(len(segments)):
fout.write('{0}'.format(segments[s]))
for name in props_name:
fout.write(', {0:.5f}, {1:.5f}'.format(elasticities[name][s], errors[name][s]))
fout.write('\n')
return segments, elasticities, errors | 0.005741 |
def _do_put(self):
"""
HTTP Put Request
"""
return requests.put(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token)) | 0.016393 |
def handle_key_rotate(self, now):
'''
Rotate the AES key rotation
'''
to_rotate = False
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
# Basic Windows permissions don't distinguish between
# user/group/all. Check for read-only state instead.
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
to_rotate = True
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
elif stats.st_mode == 0o100400:
to_rotate = True
else:
log.error('Found dropfile with incorrect permissions, ignoring...')
os.remove(dfn)
except os.error:
pass
if self.opts.get('publish_session'):
if now - self.rotate >= self.opts['publish_session']:
to_rotate = True
if to_rotate:
log.info('Rotating master AES key')
for secret_key, secret_map in six.iteritems(SMaster.secrets):
# should be unnecessary-- since no one else should be modifying
with secret_map['secret'].get_lock():
secret_map['secret'].value = salt.utils.stringutils.to_bytes(secret_map['reload']())
self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key')
self.rotate = now
if self.opts.get('ping_on_rotate'):
# Ping all minions to get them to pick up the new key
log.debug('Pinging all connected minions '
'due to key rotation')
salt.utils.master.ping_all_connected_minions(self.opts) | 0.003359 |
def patched_function(self, *args, **kwargs):
"""
Step 3. Wrapped function calling.
"""
result = self.function(*args, **kwargs)
self.validate(result)
return result | 0.009524 |
def set(self, val):
"""Set the heat set point."""
msg = ExtendedSend(
address=self._address,
commandtuple=COMMAND_THERMOSTAT_SET_HEAT_SETPOINT_0X6D_NONE,
cmd2=int(val * 2),
userdata=Userdata())
msg.set_checksum()
self._send_method(msg, self._set_heat_point_ack) | 0.005865 |
def get_all_network_interfaces(self, filters=None):
"""
Retrieve all of the Elastic Network Interfaces (ENI's)
associated with your account.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.networkinterface.NetworkInterface`
"""
params = {}
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeNetworkInterfaces', params,
[('item', NetworkInterface)], verb='POST') | 0.001923 |
def create_participant(worker_id, hit_id, assignment_id, mode):
"""Create a participant.
This route will be hit very early on as any nodes the participant creates
will be defined in reference to the participant object.
You must specify the worker_id, hit_id, assignment_id and mode in the url.
"""
# check this worker hasn't already taken part
parts = models.Participant.query.filter_by(worker_id=worker_id).all()
if parts:
print "participant already exists!"
return Response(status=200)
# make the participant
participant = models.Participant(worker_id=worker_id,
assignment_id=assignment_id,
hit_id=hit_id,
mode=mode)
session.add(participant)
session.commit()
# make a psiturk participant too, for now
from psiturk.models import Participant as PsiturkParticipant
psiturk_participant = PsiturkParticipant(workerid=worker_id,
assignmentid=assignment_id,
hitid=hit_id)
session_psiturk.add(psiturk_participant)
session_psiturk.commit()
# return the data
return success_response(field="participant",
data=participant.__json__(),
request_type="participant post") | 0.000709 |
def increase_writes_in_units(
current_provisioning, units, max_provisioned_writes,
consumed_write_units_percent, log_tag):
""" Increase the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we increase with
:returns: int -- New provisioning value
:type max_provisioned_writes: int
:param max_provisioned_writes: Configured max provisioned writes
:type consumed_write_units_percent: float
:param consumed_write_units_percent: Number of consumed write units
:type log_tag: str
:param log_tag: Prefix for the log
"""
units = int(units)
current_provisioning = float(current_provisioning)
consumed_write_units_percent = float(consumed_write_units_percent)
consumption_based_current_provisioning = \
int(math.ceil(current_provisioning*(consumed_write_units_percent/100)))
if consumption_based_current_provisioning > current_provisioning:
updated_provisioning = consumption_based_current_provisioning + units
else:
updated_provisioning = int(current_provisioning) + units
if max_provisioned_writes > 0:
if updated_provisioning > max_provisioned_writes:
logger.info(
'{0} - Reached provisioned writes max limit: {1}'.format(
log_tag,
max_provisioned_writes))
return max_provisioned_writes
logger.debug(
'{0} - Write provisioning will be increased to {1:d} units'.format(
log_tag,
int(updated_provisioning)))
return updated_provisioning | 0.000585 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.