text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns) | 0.006601 |
def search_device_by_id(self, deviceID) -> Device:
""" searches a device by given id
Args:
deviceID(str): the device to search for
Returns
the Device object or None if it couldn't find a device
"""
for d in self.devices:
if d.id == deviceID:
return d
return None | 0.010256 |
def get_triplet_tuple(a: BaseEntity, b: BaseEntity, c: BaseEntity) -> Tuple[str, str, str, str, str, str]:
"""Get the triple as a tuple of BEL/hashes."""
return a.as_bel(), a.sha512, b.as_bel(), b.sha512, c.as_bel(), c.sha512 | 0.008584 |
def with_roles(obj=None, rw=None, call=None, read=None, write=None):
"""
Convenience function and decorator to define roles on an attribute. Only
works with :class:`RoleMixin`, which reads the annotations made by this
function and populates :attr:`~RoleMixin.__roles__`.
Examples::
id = db.Column(Integer, primary_key=True)
with_roles(id, read={'all'})
@with_roles(read={'all'})
@hybrid_property
def url_id(self):
return str(self.id)
When used with properties, with_roles must always be applied after the
property is fully described::
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
# Either of the following is fine, since with_roles annotates objects
# instead of wrapping them. The return value can be discarded if it's
# already present on the host object:
with_roles(title, read={'all'}, write={'owner', 'editor'})
title = with_roles(title, read={'all'}, write={'owner', 'editor'})
:param set rw: Roles which get read and write access to the decorated
attribute
:param set call: Roles which get call access to the decorated method
:param set read: Roles which get read access to the decorated attribute
:param set write: Roles which get write access to the decorated attribute
"""
# Convert lists and None values to sets
rw = set(rw) if rw else set()
call = set(call) if call else set()
read = set(read) if read else set()
write = set(write) if write else set()
# `rw` is shorthand for read+write
read.update(rw)
write.update(rw)
def inner(attr):
__cache__[attr] = {'call': call, 'read': read, 'write': write}
try:
attr._coaster_roles = {'call': call, 'read': read, 'write': write}
# If the attr has a restrictive __slots__, we'll get an attribute error.
# Unfortunately, because of the way SQLAlchemy works, by copying objects
# into subclasses, the cache alone is not a reliable mechanism. We need both.
except AttributeError:
pass
return attr
if is_collection(obj):
# Protect against accidental specification of roles instead of an object
raise TypeError('Roles must be specified as named parameters')
elif obj is not None:
return inner(obj)
else:
return inner | 0.00199 |
def func_load(code, defaults=None, closure=None, globs=None):
"""
Reload a function
:param code: The code object
:param defaults: Default values
:param closure: The closure
:param globs: globals
:return:
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
code = marshal.loads(code)
if closure is not None:
closure = func_reconstruct_closure(closure)
if globs is None:
globs = globals()
return types.FunctionType(code, globs, name=code.co_name, argdefs=defaults, closure=closure) | 0.003344 |
def parse_expression(expression: str) -> Tuple[Set[str], List[CompositeAxis]]:
"""
Parses an indexing expression (for a single tensor).
Checks uniqueness of names, checks usage of '...' (allowed only once)
Returns set of all used identifiers and a list of axis groups
"""
identifiers = set()
composite_axes = []
if '.' in expression:
if '...' not in expression:
raise EinopsError('Expression may contain dots only inside ellipsis (...)')
if str.count(expression, '...') != 1 or str.count(expression, '.') != 3:
raise EinopsError('Expression may contain dots only inside ellipsis (...); only one ellipsis for tensor ')
expression = expression.replace('...', _ellipsis)
bracket_group = None
def add_axis_name(x):
if x is not None:
if x in identifiers:
raise ValueError('Indexing expression contains duplicate dimension "{}"'.format(x))
identifiers.add(x)
if bracket_group is None:
composite_axes.append([x])
else:
bracket_group.append(x)
current_identifier = None
for char in expression:
if char in '() ' + _ellipsis:
add_axis_name(current_identifier)
current_identifier = None
if char == _ellipsis:
if bracket_group is not None:
raise EinopsError("Ellipsis can't be used inside the composite axis (inside brackets)")
composite_axes.append(_ellipsis)
identifiers.add(_ellipsis)
elif char == '(':
if bracket_group is not None:
raise EinopsError("Axis composition is one-level (brackets inside brackets not allowed)")
bracket_group = []
elif char == ')':
if bracket_group is None:
raise EinopsError('Brackets are not balanced')
composite_axes.append(bracket_group)
bracket_group = None
elif '0' <= char <= '9':
if current_identifier is None:
raise EinopsError("Axis name can't start with a digit")
current_identifier += char
elif 'a' <= char <= 'z':
if current_identifier is None:
current_identifier = char
else:
current_identifier += char
else:
if 'A' <= char <= 'Z':
raise EinopsError("Only lower-case latin letters allowed in names, not '{}'".format(char))
raise EinopsError("Unknown character '{}'".format(char))
if bracket_group is not None:
raise EinopsError('Imbalanced parentheses in expression: "{}"'.format(expression))
add_axis_name(current_identifier)
return identifiers, composite_axes | 0.003176 |
def relative_ref(self, baseURI):
"""
Return string containing relative reference to package item from
*baseURI*. E.g. PackURI('/ppt/slideLayouts/slideLayout1.xml') would
return '../slideLayouts/slideLayout1.xml' for baseURI '/ppt/slides'.
"""
# workaround for posixpath bug in 2.6, doesn't generate correct
# relative path when *start* (second) parameter is root ('/')
if baseURI == '/':
relpath = self[1:]
else:
relpath = posixpath.relpath(self, baseURI)
return relpath | 0.003484 |
def grouper(iterable: Iterable, size: int) -> Iterable:
"""
Collect data into fixed-length chunks or blocks without discarding underfilled chunks or padding them.
:param iterable: A sequence of inputs.
:param size: Chunk size.
:return: Sequence of chunks.
"""
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, size))
if not chunk:
return
yield chunk | 0.004608 |
def serializeable_exc_info(thetype, ex, tb):
"""
Since traceback objects can not be pickled, this function manipulates
exception info tuples before they are passed accross process
boundaries.
"""
return thetype, ex, ''.join(traceback.format_exception(thetype, ex, tb)) | 0.003425 |
def fill_out(self, data, prefix='', turbo=False):
"""
Shortcut for filling out first ``<form>`` on page. See
:py:class:`~webdriverwrapper.forms.Form` for more information.
.. versionadded:: 2.0
"""
return self.get_elm(tag_name='form').fill_out(data, prefix, turbo) | 0.00639 |
def delete(self, using=None):
"""
Delete this entry.
"""
using = using or router.db_for_write(self.__class__, instance=self)
connection = connections[using]
logger.debug("Deleting LDAP entry %s" % self.dn)
connection.delete_s(self.dn)
signals.post_delete.send(sender=self.__class__, instance=self) | 0.00554 |
def create(self, handle=None, handle_type=None, **args):
"""
Creates an ontology based on a handle
Handle is one of the following
- `FILENAME.json` : creates an ontology from an obographs json file
- `obo:ONTID` : E.g. obo:pato - creates an ontology from obolibrary PURL (requires owltools)
- `ONTID` : E.g. 'pato' - creates an ontology from a remote SPARQL query
Arguments
---------
handle : str
specifies how to retrieve the ontology info
"""
if handle is None:
self.test = self.test+1
logging.info("T: "+str(self.test))
global default_ontology
if default_ontology is None:
logging.info("Creating new instance of default ontology")
default_ontology = create_ontology(default_ontology_handle)
logging.info("Using default_ontology")
return default_ontology
return create_ontology(handle, **args) | 0.00391 |
def offline_plotly_data(data, filename=None, config=None, validate=True,
default_width='100%', default_height=525, global_requirejs=False):
r""" Write a plotly scatter plot to HTML file that doesn't require server
>>> from nlpia.loaders import get_data
>>> df = get_data('etpinard') # pd.read_csv('https://plot.ly/~etpinard/191.csv')
>>> df.columns = [eval(c) if c[0] in '"\'' else str(c) for c in df.columns]
>>> data = {'data': [
... Scatter(x=df[continent+', x'],
... y=df[continent+', y'],
... text=df[continent+', text'],
... marker=Marker(size=df[continent+', size'].fillna(10000), sizemode='area', sizeref=131868,),
... mode='markers',
... name=continent) for continent in ['Africa', 'Americas', 'Asia', 'Europe', 'Oceania']
... ],
... 'layout': Layout(xaxis=XAxis(title='Life Expectancy'), yaxis=YAxis(title='GDP per Capita', type='log'))
... }
>>> html = offline_plotly_data(data, filename=None)
"""
config_default = dict(DEFAULT_PLOTLY_CONFIG)
if config is not None:
config_default.update(config)
with open(os.path.join(DATA_PATH, 'plotly.js.min'), 'rt') as f:
js = f.read()
html, divid, width, height = _plot_html(
data,
config=config_default,
validate=validate,
default_width=default_width, default_height=default_height,
global_requirejs=global_requirejs)
html = PLOTLY_HTML.format(plotlyjs=js, plotlyhtml=html)
if filename and isinstance(filename, str):
with open(filename, 'wt') as f:
f.write(html)
return html | 0.003482 |
def convert_notebooks(self):
"""Convert IPython notebooks to Python scripts in editor"""
fnames = self.get_selected_filenames()
if not isinstance(fnames, (tuple, list)):
fnames = [fnames]
for fname in fnames:
self.convert_notebook(fname) | 0.006689 |
def add_edge(self, u_vertex, v_vertex):
"""Adds the edge ``u_vertex -> v_vertex`` to the graph if the edge is not already present.
:param u_vertex: Vertex
:param v_vertex: Vertex
:return: ``True`` if a new edge was added. ``False`` otherwise.
"""
self._vertices.add(u_vertex)
self._vertices.add(v_vertex)
if not self.is_edge(u_vertex, v_vertex):
self.indegrees[v_vertex] += 1
self.adj_dict[u_vertex].append(v_vertex)
return True
return False | 0.00531 |
def to_dict(self):
""" to_dict: puts Topic or Content node data into the format that Kolibri Studio expects
Args: None
Returns: dict of channel data
"""
return {
"title": self.title,
"language" : self.language,
"description": self.description,
"node_id": self.get_node_id().hex,
"content_id": self.get_content_id().hex,
"source_domain": self.domain_ns.hex,
"source_id": self.source_id,
"author": self.author,
"aggregator": self.aggregator,
"provider": self.provider,
"files" : [f.to_dict() for f in self.files if f and f.filename], # Filter out failed downloads
"tags": self.tags,
"kind": self.kind,
"license": None,
"license_description": None,
"copyright_holder": "",
"questions": [],
"extra_fields": {},
} | 0.007143 |
def nlmsg_alloc(len_=default_msg_size):
"""Allocate a new Netlink message with maximum payload size specified.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L299
Allocates a new Netlink message without any further payload. The maximum payload size defaults to
resource.getpagesize() or as otherwise specified with nlmsg_set_default_size().
Returns:
Newly allocated Netlink message (nl_msg class instance).
"""
len_ = max(libnl.linux_private.netlink.nlmsghdr.SIZEOF, len_)
nm = nl_msg()
nm.nm_refcnt = 1
nm.nm_nlh = libnl.linux_private.netlink.nlmsghdr(bytearray(b'\0') * len_)
nm.nm_protocol = -1
nm.nm_size = len_
nm.nm_nlh.nlmsg_len = nlmsg_total_size(0)
_LOGGER.debug('msg 0x%x: Allocated new message, maxlen=%d', id(nm), len_)
return nm | 0.003654 |
def deep_merge(dict_one, dict_two):
'''
Deep merge two dicts.
'''
merged = dict_one.copy()
for key, value in dict_two.items():
# value is equivalent to dict_two[key]
if (key in dict_one and
isinstance(dict_one[key], dict) and
isinstance(value, dict)):
merged[key] = deep_merge(dict_one[key], value)
elif (key in dict_one and
isinstance(dict_one[key], list) and
isinstance(value, list)):
merged[key] = list(set(dict_one[key] + value))
else:
merged[key] = value
return merged | 0.0016 |
def _apply_snap_off(self, queue=None):
r"""
Add all the throats to the queue with snap off pressure
This is probably wrong!!!! Each one needs to start a new cluster.
"""
net = self.project.network
phase = self.project.find_phase(self)
snap_off = self.settings['snap_off']
if queue is None:
queue = self.queue[0]
try:
Pc_snap_off = phase[snap_off]
logger.info("Adding snap off pressures to queue")
for T in net.throats():
if not np.isnan(Pc_snap_off[T]):
hq.heappush(queue, [Pc_snap_off[T], T, 'throat'])
except KeyError:
logger.warning("Phase " + phase.name + " doesn't have " +
"property " + snap_off) | 0.002484 |
def colorize_text(self, text):
"""Adds escape sequences to colorize text and make it
beautiful. To colorize text, prefix the text you want to color
with the color (capitalized) wrapped in double angle brackets
(i.e.: <<GREEN>>). End your string with <<NORMAL>>. If you
don't, it will be done for you (assuming you used a color code
in your string."""
# Take note of where the escape sequences are.
rnormal = text.rfind('<<NORMAL')
rany = text.rfind('<<')
# Put in the escape sequences.
for color, code in self.colors.items():
text = text.replace('<<%s>>' % color, code)
# Make sure that the last sequence is a NORMAL sequence.
if rany > -1 and rnormal < rany:
text += self.colors['NORMAL']
return text | 0.002378 |
def close(self):
"""Explicitly kill this cursor on the server. Call like (in Tornado):
.. code-block:: python
yield cursor.close()
"""
if not self.closed:
self.closed = True
yield self._framework.yieldable(self._close()) | 0.006897 |
def bmes_tags_to_spans(tag_sequence: List[str],
classes_to_ignore: List[str] = None) -> List[TypedStringSpan]:
"""
Given a sequence corresponding to BMES tags, extracts spans.
Spans are inclusive and can be of zero length, representing a single word span.
Ill-formed spans are also included (i.e those which do not start with a "B-LABEL"),
as otherwise it is possible to get a perfect precision score whilst still predicting
ill-formed spans in addition to the correct spans.
This function works properly when the spans are unlabeled (i.e., your labels are
simply "B", "M", "E" and "S").
Parameters
----------
tag_sequence : List[str], required.
The integer class labels for a sequence.
classes_to_ignore : List[str], optional (default = None).
A list of string class labels `excluding` the bio tag
which should be ignored when extracting spans.
Returns
-------
spans : List[TypedStringSpan]
The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)).
Note that the label `does not` contain any BIO tag prefixes.
"""
def extract_bmes_tag_label(text):
bmes_tag = text[0]
label = text[2:]
return bmes_tag, label
spans: List[Tuple[str, List[int]]] = []
prev_bmes_tag: Optional[str] = None
for index, tag in enumerate(tag_sequence):
bmes_tag, label = extract_bmes_tag_label(tag)
if bmes_tag in ('B', 'S'):
# Regardless of tag, we start a new span when reaching B & S.
spans.append(
(label, [index, index])
)
elif bmes_tag in ('M', 'E') and prev_bmes_tag in ('B', 'M') and spans[-1][0] == label:
# Only expand the span if
# 1. Valid transition: B/M -> M/E.
# 2. Matched label.
spans[-1][1][1] = index
else:
# Best effort split for invalid span.
spans.append(
(label, [index, index])
)
# update previous BMES tag.
prev_bmes_tag = bmes_tag
classes_to_ignore = classes_to_ignore or []
return [
# to tuple.
(span[0], (span[1][0], span[1][1]))
for span in spans
if span[0] not in classes_to_ignore
] | 0.003353 |
def _find_player_url(response):
"""
Finds embedded player url in HTTP response.
:param response: Response object.
:returns: Player url (str).
"""
url = ''
matches = _player_re.search(response.text)
if matches:
tmp_url = matches.group(0).replace('&', '&')
if 'hash' not in tmp_url:
# there's no hash in the URL, try to find it
matches = _hash_re.search(response.text)
if matches:
url = tmp_url + '&hash=' + matches.group(1)
else:
url = tmp_url
return 'http://ceskatelevize.cz/' + url | 0.001634 |
def get_vector(self):
"""Return the vector for this survey."""
vec = {}
for dim in ['forbidden', 'required', 'permitted']:
if self.survey[dim] is None:
continue
dim_vec = map(lambda x: (x['tag'], x['answer']),
self.survey[dim])
vec[dim] = dict(dim_vec)
return vec | 0.008021 |
def model_data(self):
"""str: The model location in S3. Only set if Estimator has been ``fit()``."""
if self.latest_training_job is not None:
model_uri = self.sagemaker_session.sagemaker_client.describe_training_job(
TrainingJobName=self.latest_training_job.name)['ModelArtifacts']['S3ModelArtifacts']
else:
logging.warning('No finished training job found associated with this estimator. Please make sure'
'this estimator is only used for building workflow config')
model_uri = os.path.join(self.output_path, self._current_job_name, 'output', 'model.tar.gz')
return model_uri | 0.011628 |
def operations(*operations):
'''Decorator for marking Resource methods as HTTP operations.
This decorator does a number of different things:
- It transfer onto itself docstring and annotations from the decorated
method, so as to be "transparent" with regards to introspection.
- It tranform the method so as to make it a classmethod.
- It invokes the method within a try-except condition, so as to
intercept and populate the Fail(<code>) conditions.'''
def decorator(method):
def wrapper(cls, request, start_response, **kwargs):
result_cache = []
try:
yield from method(cls, request, **kwargs)
except Respond as e:
# Inject messages as taken from signature
status = e.status
msg = utils.parse_return_annotation(method)[status]['message']
if status / 100 == 2: # All 2xx HTTP codes
e.description = msg
raise e
else: # HTTP Errors --> use werkzeug exceptions
raise CODES_TO_EXCEPTIONS[status](msg)
# Add operation-specific attributes to the method.
method.swagger_ops = operations
method.signature = inspect.signature(method)
method.source = inspect.getsource(method)
method.path_vars = utils.extract_pathvars(method)
# "Backport" the method introspective attributes to the wrapper.
wrapper.__name__ = method.__name__
wrapper.__doc__ = method.__doc__
wrapper.__annotations__ = method.__annotations__
wrapper.swagger_ops = method.swagger_ops
wrapper.signature = method.signature
wrapper.source = method.source
wrapper.path_vars = method.path_vars
return classmethod(wrapper)
return decorator | 0.000537 |
def get_search(self):
"""Get current search or search from session, reset page if search is changed"""
old_search = self.get_session_value('search', '')
search = self.get_and_save_value('search', '')
if old_search != search:
self.page = 1
self.get_session_value('page', self.page)
return search | 0.00838 |
def standardize():
"""
return variant standarize function
"""
def f(G, bim):
G_out = standardize_snps(G)
return G_out, bim
return f | 0.005618 |
def kappa_analysis_altman(kappa):
"""
Analysis kappa number with Altman benchmark.
:param kappa: kappa number
:type kappa : float
:return: strength of agreement as str
"""
try:
if kappa < 0.2:
return "Poor"
if kappa >= 0.20 and kappa < 0.4:
return "Fair"
if kappa >= 0.40 and kappa < 0.6:
return "Moderate"
if kappa >= 0.60 and kappa < 0.8:
return "Good"
if kappa >= 0.80 and kappa <= 1:
return "Very Good"
return "None"
except Exception: # pragma: no cover
return "None" | 0.00161 |
def get_snapshot_closest_to_state_change(
self,
state_change_identifier: int,
) -> Tuple[int, Any]:
""" Get snapshots earlier than state_change with provided ID. """
if not (state_change_identifier == 'latest' or isinstance(state_change_identifier, int)):
raise ValueError("from_identifier must be an integer or 'latest'")
cursor = self.conn.cursor()
if state_change_identifier == 'latest':
cursor.execute(
'SELECT identifier FROM state_changes ORDER BY identifier DESC LIMIT 1',
)
result = cursor.fetchone()
if result:
state_change_identifier = result[0]
else:
state_change_identifier = 0
cursor = self.conn.execute(
'SELECT statechange_id, data FROM state_snapshot '
'WHERE statechange_id <= ? '
'ORDER BY identifier DESC LIMIT 1',
(state_change_identifier, ),
)
rows = cursor.fetchall()
if rows:
assert len(rows) == 1, 'LIMIT 1 must return one element'
last_applied_state_change_id = rows[0][0]
snapshot_state = rows[0][1]
result = (last_applied_state_change_id, snapshot_state)
else:
result = (0, None)
return result | 0.003671 |
def _get_stripped_marker(marker, strip_func):
"""Build a new marker which is cleaned according to `strip_func`"""
if not marker:
return None
marker = _ensure_marker(marker)
elements = marker._markers
strip_func(elements)
if elements:
return marker
return None | 0.003289 |
def change_t(self, t):
'''
Change temperature.
Override.
Args:
t: Now temperature.
Returns:
Next temperature.
'''
t = super().change_t(t)
self.__now_cycles += 1
if self.__now_cycles % self.__reannealing_per == 0:
t = t * self.__thermostat
if t < self.__t_min:
t = self.__t_default
return t | 0.012605 |
def insert(self, data):
"""insert inserts one datapoint with the given data, and appends it to
the end of the stream::
s = cdb["mystream"]
s.create({"type": "string"})
s.insert("Hello World!")
"""
self.insert_array([{"d": data, "t": time.time()}], restamp=True) | 0.006006 |
def _prompt_started(self):
""" Called immediately after a new prompt is displayed.
"""
# Temporarily disable the maximum block count to permit undo/redo and
# to ensure that the prompt position does not change due to truncation.
self._control.document().setMaximumBlockCount(0)
self._control.setUndoRedoEnabled(True)
# Work around bug in QPlainTextEdit: input method is not re-enabled
# when read-only is disabled.
self._control.setReadOnly(False)
self._control.setAttribute(QtCore.Qt.WA_InputMethodEnabled, True)
if not self._reading:
self._executing = False
self._prompt_started_hook()
# If the input buffer has changed while executing, load it.
if self._input_buffer_pending:
self.input_buffer = self._input_buffer_pending
self._input_buffer_pending = ''
self._control.moveCursor(QtGui.QTextCursor.End) | 0.002073 |
def difference(self, other):
"""Return a new set which I{self} - I{other}, i.e. the items
in I{self} which are not also in I{other}.
@param other: the other set
@type other: Set object
@rtype: the same type as I{self}
"""
obj = self._clone()
obj.difference_update(other)
return obj | 0.005634 |
def absent(name, hostid=None, **kwargs):
'''
Ensures that the mediatype does not exist, eventually deletes the mediatype.
:param name: name of the usermacro
:param hostid: id's of the hosts to apply the usermacro on, if missing a global usermacro is assumed.
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. code-block:: yaml
delete_usermacro:
zabbix_usermacro.absent:
- name: '{$SNMP_COMMUNITY}'
'''
connection_args = {}
if '_connection_user' in kwargs:
connection_args['_connection_user'] = kwargs['_connection_user']
if '_connection_password' in kwargs:
connection_args['_connection_password'] = kwargs['_connection_password']
if '_connection_url' in kwargs:
connection_args['_connection_url'] = kwargs['_connection_url']
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
# Comment and change messages
if hostid:
comment_usermacro_deleted = 'Usermacro {0} deleted from hostid {1}.'.format(name, hostid)
comment_usermacro_notdeleted = 'Unable to delete usermacro: {0} from hostid {1}.'.format(name, hostid)
comment_usermacro_notexists = 'Usermacro {0} does not exist on hostid {1}.'.format(name, hostid)
changes_usermacro_deleted = {name: {'old': 'Usermacro {0} exists on hostid {1}.'.format(name, hostid),
'new': 'Usermacro {0} deleted from {1}.'.format(name, hostid),
}
}
else:
comment_usermacro_deleted = 'Usermacro {0} deleted.'.format(name)
comment_usermacro_notdeleted = 'Unable to delete usermacro: {0}.'.format(name)
comment_usermacro_notexists = 'Usermacro {0} does not exist.'.format(name)
changes_usermacro_deleted = {name: {'old': 'Usermacro {0} exists.'.format(name),
'new': 'Usermacro {0} deleted.'.format(name),
}
}
if hostid:
usermacro_exists = __salt__['zabbix.usermacro_get'](name, hostids=hostid, **connection_args)
else:
usermacro_exists = __salt__['zabbix.usermacro_get'](name, globalmacro=True, **connection_args)
# Dry run, test=true mode
if __opts__['test']:
if not usermacro_exists:
ret['result'] = True
ret['comment'] = comment_usermacro_notexists
else:
ret['result'] = None
ret['comment'] = comment_usermacro_deleted
return ret
if not usermacro_exists:
ret['result'] = True
ret['comment'] = comment_usermacro_notexists
else:
try:
if hostid:
usermacroid = usermacro_exists[0]['hostmacroid']
usermacro_delete = __salt__['zabbix.usermacro_delete'](usermacroid, **connection_args)
else:
usermacroid = usermacro_exists[0]['globalmacroid']
usermacro_delete = __salt__['zabbix.usermacro_deleteglobal'](usermacroid, **connection_args)
except KeyError:
usermacro_delete = False
if usermacro_delete and 'error' not in usermacro_delete:
ret['result'] = True
ret['comment'] = comment_usermacro_deleted
ret['changes'] = changes_usermacro_deleted
else:
ret['result'] = False
ret['comment'] = comment_usermacro_notdeleted + six.text_type(usermacro_delete['error'])
return ret | 0.005415 |
def _realpath(fs, path, seen=pset()):
"""
.. warning::
The ``os.path`` module's realpath does not error or warn about
loops, but we do, following the behavior of GNU ``realpath(1)``!
"""
real = Path.root()
for segment in path.segments:
current = real / segment
seen = seen.add(current)
while True:
try:
current = fs.readlink(current)
except (exceptions.FileNotFound, exceptions.NotASymlink):
break
else:
current = current.relative_to(real)
if current in seen:
raise exceptions.SymbolicLoop(path)
current = fs.realpath(current, seen=seen)
real = current
return real | 0.001292 |
def _drop_oldest_chunk(self):
'''
To handle the case when the items comming in the chunk
is more than the maximum capacity of the chunk. Our intent
behind is to remove the oldest chunk. So that the items come
flowing in.
>>> s = StreamCounter(5,5)
>>> data_stream = ['a','b','c','d']
>>> for item in data_stream:
... s.add(item)
>>> min(s.chunked_counts.keys())
0
>>> s.chunked_counts
{0: {'a': 1, 'b': 1, 'c': 1, 'd': 1}}
>>> data_stream = ['a','b','c','d','a','e','f']
>>> for item in data_stream:
... s.add(item)
>>> min(s.chunked_counts.keys())
2
>>> s.chunked_counts
{2: {'f': 1}}
'''
chunk_id = min(self.chunked_counts.keys())
chunk = self.chunked_counts.pop(chunk_id)
self.n_counts -= len(chunk)
for k, v in list(chunk.items()):
self.counts[k] -= v
self.counts_total -= v | 0.001978 |
def parse_package(self, p_term):
"""Parses package fields."""
# Check there is a pacakge name
if not (p_term, self.spdx_namespace['name'], None) in self.graph:
self.error = True
self.logger.log('Package must have a name.')
# Create dummy package so that we may continue parsing the rest of
# the package fields.
self.builder.create_package(self.doc, 'dummy_package')
else:
for _s, _p, o in self.graph.triples((p_term, self.spdx_namespace['name'], None)):
try:
self.builder.create_package(self.doc, six.text_type(o))
except CardinalityError:
self.more_than_one_error('Package name')
break
self.p_pkg_vinfo(p_term, self.spdx_namespace['versionInfo'])
self.p_pkg_fname(p_term, self.spdx_namespace['packageFileName'])
self.p_pkg_suppl(p_term, self.spdx_namespace['supplier'])
self.p_pkg_originator(p_term, self.spdx_namespace['originator'])
self.p_pkg_down_loc(p_term, self.spdx_namespace['downloadLocation'])
self.p_pkg_homepg(p_term, self.doap_namespace['homepage'])
self.p_pkg_chk_sum(p_term, self.spdx_namespace['checksum'])
self.p_pkg_src_info(p_term, self.spdx_namespace['sourceInfo'])
self.p_pkg_verif_code(p_term, self.spdx_namespace['packageVerificationCode'])
self.p_pkg_lic_conc(p_term, self.spdx_namespace['licenseConcluded'])
self.p_pkg_lic_decl(p_term, self.spdx_namespace['licenseDeclared'])
self.p_pkg_lics_info_from_files(p_term, self.spdx_namespace['licenseInfoFromFiles'])
self.p_pkg_comments_on_lics(p_term, self.spdx_namespace['licenseComments'])
self.p_pkg_cr_text(p_term, self.spdx_namespace['copyrightText'])
self.p_pkg_summary(p_term, self.spdx_namespace['summary'])
self.p_pkg_descr(p_term, self.spdx_namespace['description']) | 0.003041 |
def get_var(var):
'''
Get the value of a variable in make.conf
Return the value of the variable or None if the variable is not in
make.conf
CLI Example:
.. code-block:: bash
salt '*' makeconf.get_var 'LINGUAS'
'''
makeconf = _get_makeconf()
# Open makeconf
with salt.utils.files.fopen(makeconf) as fn_:
conf_file = salt.utils.data.decode(fn_.readlines())
for line in conf_file:
if line.startswith(var):
ret = line.split('=', 1)[1]
if '"' in ret:
ret = ret.split('"')[1]
elif '#' in ret:
ret = ret.split('#')[0]
ret = ret.strip()
return ret
return None | 0.001391 |
def persist(self, status=None):
"""
Enables persistent mode for the current mock.
Returns:
self: current Mock instance.
"""
self._persist = status if type(status) is bool else True | 0.008584 |
def get_version(root):
"""
Load and return the contents of version.json.
:param root: The root path that the ``version.json`` file will be opened
:type root: str
:returns: Content of ``version.json`` or None
:rtype: dict or None
"""
version_json = os.path.join(root, 'version.json')
if os.path.exists(version_json):
with open(version_json, 'r') as version_json_file:
return json.load(version_json_file)
return None | 0.002105 |
def list_packages(self, installed=True, notinstalled=True):
"""Return a list with all the installed/notinstalled packages"""
self.profile = Profile()
# Classify packages
installed_packages = []
notinstalled_packages = []
for package in self.packages:
data = {
'name': package,
'version': None,
'description': self.packages.get(package).get('description')
}
if package in self.profile.packages:
data['version'] = self.profile.get_package_version(
package, self.get_package_release_name(package))
installed_packages += [data]
else:
notinstalled_packages += [data]
# Print tables
terminal_width, _ = click.get_terminal_size()
if installed and installed_packages:
# - Print installed packages table
click.echo('\nInstalled packages:\n')
PACKAGELIST_TPL = ('{name:20} {description:30} {version:<8}')
click.echo('-' * terminal_width)
click.echo(PACKAGELIST_TPL.format(
name=click.style('Name', fg='cyan'), version='Version',
description='Description'))
click.echo('-' * terminal_width)
for package in installed_packages:
click.echo(PACKAGELIST_TPL.format(
name=click.style(package.get('name'), fg='cyan'),
version=package.get('version'),
description=package.get('description')))
if notinstalled and notinstalled_packages:
# - Print not installed packages table
click.echo('\nNot installed packages:\n')
PACKAGELIST_TPL = ('{name:20} {description:30}')
click.echo('-' * terminal_width)
click.echo(PACKAGELIST_TPL.format(
name=click.style('Name', fg='yellow'),
description='Description'))
click.echo('-' * terminal_width)
for package in notinstalled_packages:
click.echo(PACKAGELIST_TPL.format(
name=click.style(package.get('name'), fg='yellow'),
description=package.get('description'))) | 0.000869 |
def write(self, pkt):
"""
Writes a Packet or bytes to a pcap file.
:param pkt: Packet(s) to write (one record for each Packet), or raw
bytes to write (as one record).
:type pkt: iterable[Packet], Packet or bytes
"""
if isinstance(pkt, bytes):
if not self.header_present:
self._write_header(pkt)
self._write_packet(pkt)
else:
pkt = pkt.__iter__()
for p in pkt:
if not self.header_present:
self._write_header(p)
self._write_packet(p) | 0.003205 |
def create_mask(indexer, shape, chunks_hint=None):
"""Create a mask for indexing with a fill-value.
Parameters
----------
indexer : ExplicitIndexer
Indexer with -1 in integer or ndarray value to indicate locations in
the result that should be masked.
shape : tuple
Shape of the array being indexed.
chunks_hint : tuple, optional
Optional tuple indicating desired chunks for the result. If provided,
used as a hint for chunks on the resulting dask. Must have a hint for
each dimension on the result array.
Returns
-------
mask : bool, np.ndarray or dask.array.Array with dtype=bool
Dask array if chunks_hint is provided, otherwise a NumPy array. Has the
same shape as the indexing result.
"""
if isinstance(indexer, OuterIndexer):
key = _outer_to_vectorized_indexer(indexer, shape).tuple
assert not any(isinstance(k, slice) for k in key)
mask = _masked_result_drop_slice(key, chunks_hint)
elif isinstance(indexer, VectorizedIndexer):
key = indexer.tuple
base_mask = _masked_result_drop_slice(key, chunks_hint)
slice_shape = tuple(np.arange(*k.indices(size)).size
for k, size in zip(key, shape)
if isinstance(k, slice))
expanded_mask = base_mask[
(Ellipsis,) + (np.newaxis,) * len(slice_shape)]
mask = duck_array_ops.broadcast_to(
expanded_mask, base_mask.shape + slice_shape)
elif isinstance(indexer, BasicIndexer):
mask = any(k == -1 for k in indexer.tuple)
else:
raise TypeError('unexpected key type: {}'.format(type(indexer)))
return mask | 0.000578 |
def iter_ancestors(self):
"""
Iterates over the list of all ancestor nodes from
current node to the current tree root.
"""
node = self
while node.up is not None:
yield node.up
node = node.up | 0.015152 |
def from_taxdb(cls, con, root=None):
"""
Generate a TaxNode from a taxonomy database
"""
cursor = con.cursor()
if root is None:
cursor.execute(
"SELECT tax_id, rank FROM nodes WHERE tax_id = parent_id")
else:
cursor.execute(
"SELECT tax_id, rank FROM nodes WHERE tax_id = ?", [root])
tax_id, rank = cursor.fetchone()
root = cls(rank=rank, tax_id=tax_id)
def add_lineage(parent):
cursor.execute("""SELECT tax_id, rank, tax_name
FROM nodes INNER JOIN names USING (tax_id)
WHERE parent_id = :1 and tax_id <> :1
AND names.is_primary = 1
""", [parent.tax_id])
for tax_id, rank, name in cursor:
node = cls(rank=rank, tax_id=tax_id, name=name)
parent.add_child(node)
for child in parent.children:
add_lineage(child)
add_lineage(root)
return root | 0.001899 |
def getReadGroupSetByName(self, name):
"""
Returns a ReadGroupSet with the specified name, or raises a
ReadGroupSetNameNotFoundException if it does not exist.
"""
if name not in self._readGroupSetNameMap:
raise exceptions.ReadGroupSetNameNotFoundException(name)
return self._readGroupSetNameMap[name] | 0.005556 |
def calcphot(log, wavelengthArray, fluxArray, obsmode, extrapolate=False):
"""
*Run calcphot on single spectrum and filter.*
**Key Arguments:**
- ``log`` -- logger
- ``wavelengthArray`` -- the array containing the wavelength range of the spectrum
- ``fluxArray`` -- the array contain the respective spectrum flux (as function of wavelength)
- ``obsmode`` -- the observation mode (generally a filter system and filter type, e.g. "sdss,g")
- ``extrapolate`` -- extrapolate spectra in database to cover the requested band-pass. Default *False*.
**Return:**
- None
"""
################ > IMPORTS ################
## STANDARD LIB ##
## THIRD PARTY ##
import pysynphot as syn
## LOCAL APPLICATION ##
if extrapolate:
force = "extrapolate"
else:
force = None
################ > VARIABLE SETTINGS ######
# Read in a spectrum from a file
sp = syn.ArraySpectrum(
wave=wavelengthArray, flux=fluxArray, waveunits='angstrom', fluxunits='flam')
bp = syn.ObsBandpass(obsmode)
obs = syn.Observation(sp, bp, force=force)
abMag = obs.effstim('abmag')
return abMag | 0.00919 |
def memcached(servers, key=None, from_cache=None, to_cache=None, time=0,
min_compress_len=0, debug=False):
"""memcached memoization function decorator.
The wrapped function is expected to return a value that is stored to a
memcached server, first translated by `to_cache` if provided. In the event
of a cache hit, the data is translated by `from_cache` if provided, before
being returned. If you do not want a result to be cached, wrap the return
value of your function in a `DoNotCache` object.
Example:
@memcached('127.0.0.1:11211')
def _listdir(path):
return os.path.listdir(path)
Note:
If using the default key function, ensure that repr() is implemented on
all your arguments and that they are hashable.
Note:
`from_cache` and `to_cache` both accept the value as first parameter,
then the target function's arguments follow.
Args:
servers (str or list of str): memcached server uri(s), eg '127.0.0.1:11211'.
This arg can be None also, in which case memcaching is disabled.
key (callable, optional): Function that, given the target function's args,
returns the string key to use in memcached.
from_cache (callable, optional): If provided, and a cache hit occurs, the
cached value will be translated by this function before being returned.
to_cache (callable, optional): If provided, and a cache miss occurs, the
function's return value will be translated by this function before
being cached.
time (int): Tells memcached the time which this value should expire, either
as a delta number of seconds, or an absolute unix time-since-the-epoch
value. See the memcached protocol docs section "Storage Commands"
for more info on <exptime>. We default to 0 == cache forever.
min_compress_len (int): The threshold length to kick in auto-compression
of the value using the zlib.compress() routine. If the value being cached is
a string, then the length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If the resulting
attempt at compression yeilds a larger string than the input, then it is
discarded. For backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
debug (bool): If True, memcache keys are kept human readable, so you can
read them if running a foreground memcached proc with 'memcached -vv'.
However this increases chances of key clashes so should not be left
turned on.
"""
def default_key(func, *nargs, **kwargs):
parts = [func.__module__]
argnames = getargspec(func).args
if argnames:
if argnames[0] == "cls":
cls_ = nargs[0]
parts.append(cls_.__name__)
nargs = nargs[1:]
elif argnames[0] == "self":
cls_ = nargs[0].__class__
parts.append(cls_.__name__)
nargs = nargs[1:]
parts.append(func.__name__)
value = ('.'.join(parts), nargs, tuple(sorted(kwargs.items())))
# make sure key is hashable. We don't strictly need it to be, but this
# is a way of hopefully avoiding object types that are not ordered (these
# would give an unreliable key). If you need to key on unhashable args,
# you should provide your own `key` functor.
_ = hash(value)
return repr(value)
def identity(value, *nargs, **kwargs):
return value
from_cache = from_cache or identity
to_cache = to_cache or identity
def decorator(func):
if servers:
def wrapper(*nargs, **kwargs):
with memcached_client(servers, debug=debug) as client:
if key:
cache_key = key(*nargs, **kwargs)
else:
cache_key = default_key(func, *nargs, **kwargs)
# get
result = client.get(cache_key)
if result is not client.miss:
return from_cache(result, *nargs, **kwargs)
# cache miss - run target function
result = func(*nargs, **kwargs)
if isinstance(result, DoNotCache):
return result.result
# store
cache_result = to_cache(result, *nargs, **kwargs)
client.set(key=cache_key,
val=cache_result,
time=time,
min_compress_len=min_compress_len)
return result
else:
def wrapper(*nargs, **kwargs):
result = func(*nargs, **kwargs)
if isinstance(result, DoNotCache):
return result.result
return result
def forget():
"""Forget entries in the cache.
Note that this does not delete entries from a memcached server - that
would be slow and error-prone. Calling this function only ensures
that entries set by the current process will no longer be seen during
this process.
"""
with memcached_client(servers, debug=debug) as client:
client.flush()
wrapper.forget = forget
wrapper.__wrapped__ = func
return update_wrapper(wrapper, func)
return decorator | 0.003329 |
def set_version(old_version, new_version):
"""
Write new version into VERSION_FILE
"""
try:
if APISettings.DEBUG:
Shell.debug('* ' + old_version + ' --> ' + new_version)
return True
for line in fileinput.input(os.path.abspath(APISettings.VERSION_FILE), inplace=True):
print(line.replace(old_version, new_version), end='')
Shell.success('* ' + old_version + ' --> ' + new_version)
except FileNotFoundError:
Shell.warn('File not found!') | 0.005245 |
def confirm_value(self, widget, x, y):
"""event called clicking on the gauge and so changing its value.
propagates the new value
"""
self.gauge.onmousedown(self.gauge, x, y)
params = (self.gauge.value)
return params | 0.007519 |
def create_oss_volume(cls, name, bucket, endpoint, access_key_id,
secret_access_key, access_mode, description=None,
prefix=None, properties=None, api=None):
"""
Create oss volume.
:param name: Volume name.
:param bucket: Referenced bucket.
:param access_key_id: Access key identifier.
:param secret_access_key: Secret access key.
:param access_mode: Access Mode.
:param endpoint: Volume Endpoint.
:param description: Volume description.
:param prefix: Volume prefix.
:param properties: Volume properties.
:param api: Api instance.
:return: Volume object.
"""
service = {
'type': VolumeType.OSS,
'bucket': bucket,
'endpoint': endpoint,
'credentials': {
'access_key_id': access_key_id,
'secret_access_key': secret_access_key
}
}
if prefix:
service['prefix'] = prefix
if properties:
service['properties'] = properties
data = {
'name': name,
'service': service,
'access_mode': access_mode
}
if description:
data['description'] = description
api = api or cls._API
extra = {
'resource': cls.__name__,
'query': data
}
logger.info('Creating oss volume', extra=extra)
response = api.post(url=cls._URL['query'], data=data).json()
return Volume(api=api, **response) | 0.002483 |
def order(self, last):
'''Perform ordering with respect model fields.'''
desc = last.desc
field = last.name
nested = last.nested
nested_args = []
while nested:
meta = nested.model._meta
nested_args.extend((self.backend.basekey(meta), nested.name))
last = nested
nested = nested.nested
method = 'ALPHA' if last.field.internal_type == 'text' else ''
if field == last.model._meta.pkname():
field = ''
return {'field': field,
'method': method,
'desc': desc,
'nested': nested_args} | 0.002972 |
def batch(self, batch_size, batch_num, fluxes=True):
"""Create a batch generator.
This is useful to generate n batches of m samples each.
Parameters
----------
batch_size : int
The number of samples contained in each batch (m).
batch_num : int
The number of batches in the generator (n).
fluxes : boolean
Whether to return fluxes or the internal solver variables. If set
to False will return a variable for each forward and backward flux
as well as all additional variables you might have defined in the
model.
Yields
------
pandas.DataFrame
A DataFrame with dimensions (batch_size x n_r) containing
a valid flux sample for a total of n_r reactions (or variables if
fluxes=False) in each row.
"""
for i in range(batch_num):
yield self.sample(batch_size, fluxes=fluxes) | 0.002022 |
def _memcache_key(self, timestamped=False):
"""Makes a key suitable as a memcache entry."""
request = tuple(map(str, self.package_requests))
repo_ids = []
for path in self.package_paths:
repo = package_repository_manager.get_repository(path)
repo_ids.append(repo.uid)
t = ["resolve",
request,
tuple(repo_ids),
self.package_filter_hash,
self.package_orderers_hash,
self.building,
config.prune_failed_graph]
if timestamped and self.timestamp:
t.append(self.timestamp)
return str(tuple(t)) | 0.003035 |
def _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/'):
"""A generic function to load mnist-like dataset.
Parameters:
----------
shape : tuple
The shape of digit images.
path : str
The path that the data is downloaded to.
name : str
The dataset name you want to use(the default is 'mnist').
url : str
The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/').
"""
path = os.path.join(path, name)
# Define functions for loading mnist-like data's images and labels.
# For convenience, they also download the requested files if needed.
def load_mnist_images(path, filename):
filepath = maybe_download_and_extract(filename, path, url)
logging.info(filepath)
# Read the inputs in Yann LeCun's binary format.
with gzip.open(filepath, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# The inputs are vectors now, we reshape them to monochrome 2D images,
# following the shape convention: (examples, channels, rows, columns)
data = data.reshape(shape)
# The inputs come as bytes, we convert them to float32 in range [0,1].
# (Actually to range [0, 255/256], for compatibility to the version
# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
return data / np.float32(256)
def load_mnist_labels(path, filename):
filepath = maybe_download_and_extract(filename, path, url)
# Read the labels in Yann LeCun's binary format.
with gzip.open(filepath, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are vectors of integers now, that's exactly what we want.
return data
# Download and read the training and test set images and labels.
logging.info("Load or Download {0} > {1}".format(name.upper(), path))
X_train = load_mnist_images(path, 'train-images-idx3-ubyte.gz')
y_train = load_mnist_labels(path, 'train-labels-idx1-ubyte.gz')
X_test = load_mnist_images(path, 't10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels(path, 't10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
# We just return all the arrays in order, as expected in main().
# (It doesn't matter how we do this as long as we can read them again.)
X_train = np.asarray(X_train, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.int32)
X_val = np.asarray(X_val, dtype=np.float32)
y_val = np.asarray(y_val, dtype=np.int32)
X_test = np.asarray(X_test, dtype=np.float32)
y_test = np.asarray(y_test, dtype=np.int32)
return X_train, y_train, X_val, y_val, X_test, y_test | 0.000692 |
def draw_panel(self, data, panel_params, coord, ax, **params):
"""
Plot all groups
For effeciency, geoms that do not need to partition
different groups before plotting should override this
method and avoid the groupby.
Parameters
----------
data : dataframe
Data to be plotted by this geom. This is the
dataframe created in the plot_build pipeline.
panel_params : dict
The scale information as may be required by the
axes. At this point, that information is about
ranges, ticks and labels. Keys of interest to
the geom are::
'x_range' # tuple
'y_range' # tuple
coord : coord
Coordinate (e.g. coord_cartesian) system of the
geom.
ax : axes
Axes on which to plot.
params : dict
Combined parameters for the geom and stat. Also
includes the 'zorder'.
"""
for _, gdata in data.groupby('group'):
gdata.reset_index(inplace=True, drop=True)
self.draw_group(gdata, panel_params, coord, ax, **params) | 0.001672 |
def hot(self, limit=None):
"""GETs hot links from this subreddit. Calls :meth:`narwal.Reddit.hot`.
:param limit: max number of links to return
"""
return self._reddit.hot(self.display_name, limit=limit) | 0.016393 |
def select_directory(self):
"""Select directory"""
self.redirect_stdio.emit(False)
directory = getexistingdirectory(self.main, _("Select directory"),
getcwd_or_home())
if directory:
self.chdir(directory)
self.redirect_stdio.emit(True) | 0.005988 |
def underscore_to_camelcase(value, first_upper=True):
"""Transform string from underscore_string to camelCase.
:param value: string with underscores
:param first_upper: the result will have its first character in upper case
:type value: str
:return: string in CamelCase or camelCase according to the first_upper
:rtype: str
:Example:
>>> underscore_to_camelcase('camel_case')
'CamelCase'
>>> underscore_to_camelcase('camel_case', False)
'camelCase'
"""
value = str(value)
camelized = "".join(x.title() if x else '_' for x in value.split("_"))
if not first_upper:
camelized = camelized[0].lower() + camelized[1:]
return camelized | 0.001393 |
def _get_nblock_regions(in_file, min_n_size, ref_regions):
"""Retrieve coordinates of regions in reference genome with no mapping.
These are potential breakpoints for parallelizing analysis.
"""
out_lines = []
called_contigs = set([])
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
contig, start, end, ctype = line.rstrip().split()
called_contigs.add(contig)
if (ctype in ["REF_N", "NO_COVERAGE", "EXCESSIVE_COVERAGE", "LOW_COVERAGE"] and
int(end) - int(start) > min_n_size):
out_lines.append("%s\t%s\t%s\n" % (contig, start, end))
for refr in ref_regions:
if refr.chrom not in called_contigs:
out_lines.append("%s\t%s\t%s\n" % (refr.chrom, 0, refr.stop))
return pybedtools.BedTool("\n".join(out_lines), from_string=True) | 0.003429 |
def do_inspect(self, arg):
"""
i(nspect) object
Inspect an object
"""
if arg in self.curframe.f_locals:
obj = self.curframe.f_locals[arg]
elif arg in self.curframe.f_globals:
obj = self.curframe.f_globals[arg]
else:
obj = WebPdb.null
if obj is not WebPdb.null:
self.console.writeline(
'{0} = {1}:\n'.format(arg, type(obj))
)
for name, value in inspect.getmembers(obj):
if not (name.startswith('__') and (name.endswith('__'))):
self.console.writeline(' {0}: {1}\n'.format(
name, self._get_repr(value, pretty=True, indent=8)
))
else:
self.console.writeline(
'NameError: name "{0}" is not defined\n'.format(arg)
)
self.console.flush() | 0.002169 |
def notify_about_new_variables(callback):
"""Calls `callback(var)` for all newly created variables.
Callback should not modify the variable passed in. Use cases that require
variables to be modified should use `variable_creator_scope` directly and sit
within the variable creator stack.
>>> variables = []
>>> with notify_about_variables(variables.append):
... v = tf.Variable(1.0, name='v')
... w = tf.get_variable('w', [])
>>> assert variables == [v, w]
Args:
callback: a callable taking a single argument which is a tf.Variable.
Yields:
`None` - used for contextmanager API.
"""
def _tracking_creator(getter, **kwargs):
v = getter(**kwargs)
callback(v)
return v
with tf.variable_creator_scope(_tracking_creator):
yield | 0.005115 |
def append(ol,ele,**kwargs):
'''
from elist.elist import *
ol = [1,2,3,4]
ele = 5
id(ol)
append(ol,ele,mode="original")
ol
id(ol)
####
ol = [1,2,3,4]
ele = 5
id(ol)
new = append(ol,ele)
new
id(new)
'''
if('mode' in kwargs):
mode = kwargs["mode"]
else:
mode = "new"
if(mode == "new"):
new = copy.deepcopy(ol)
new.append(ele)
return(new)
else:
ol.append(ele)
return(ol) | 0.005367 |
def all_experiments(self):
"""
Similar to experiments,
but uses the default manager to return archived experiments as well.
"""
from db.models.experiments import Experiment
return Experiment.all.filter(experiment_group=self) | 0.007326 |
def getAttributeValue(self, index):
"""
This function is only used to look up strings
All other work is done by
:func:`~androguard.core.bytecodes.axml.format_value`
# FIXME should unite those functions
:param index: index of the attribute
:return:
"""
offset = self._get_attribute_offset(index)
valueType = self.m_attributes[offset + ATTRIBUTE_IX_VALUE_TYPE]
if valueType == TYPE_STRING:
valueString = self.m_attributes[offset + ATTRIBUTE_IX_VALUE_STRING]
return self.sb[valueString]
return '' | 0.003263 |
def visit_subscript(self, node, parent):
"""visit a Subscript node by returning a fresh instance of it"""
context = self._get_context(node)
newnode = nodes.Subscript(
ctx=context, lineno=node.lineno, col_offset=node.col_offset, parent=parent
)
newnode.postinit(
self.visit(node.value, newnode), self.visit(node.slice, newnode)
)
return newnode | 0.007092 |
def moran_sexual(network):
"""The generalized sexual Moran process.
Ateach time step, and individual is chosen for replication and another
individual is chosen to die. The replication replaces the one who dies.
For this process to work you need to add a new agent before calling step.
"""
if not network.transmissions():
replacer = random.choice(network.nodes(type=Source))
replacer.transmit()
else:
from operator import attrgetter
agents = network.nodes(type=Agent)
baby = max(agents, key=attrgetter('creation_time'))
agents = [a for a in agents if a.id != baby.id]
replacer = random.choice(agents)
replaced = random.choice(
replacer.neighbors(direction="to", type=Agent))
# Give the baby the same outgoing connections as the replaced.
for node in replaced.neighbors(direction="to"):
baby.connect(direction="to", whom=node)
# Give the baby the same incoming connections as the replaced.
for node in replaced.neighbors(direction="from"):
node.connect(direction="to", whom=baby)
# Kill the replaced agent.
replaced.fail()
# Endow the baby with the ome of the replacer.
replacer.transmit(to_whom=baby) | 0.000772 |
def get_blob(profile, sha):
"""Fetch a blob.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
sha
The SHA of the blob to fetch.
Returns:
A dict with data about the blob.
"""
resource = "/blobs/" + sha
data = api.get_request(profile, resource)
return prepare(data) | 0.001992 |
def read_calibration(
detx=None, det_id=None, from_file=False, det_id_table=None
):
"""Retrive calibration from file, the DB."""
from km3pipe.calib import Calibration # noqa
if not (detx or det_id or from_file):
return None
if detx is not None:
return Calibration(filename=detx)
if from_file:
det_ids = np.unique(det_id_table)
if len(det_ids) > 1:
log.critical("Multiple detector IDs found in events.")
det_id = det_ids[0]
if det_id is not None:
if det_id < 0:
log.warning(
"Negative detector ID found ({0}). This is a MC "
"detector and cannot be retrieved from the DB.".format(det_id)
)
return None
return Calibration(det_id=det_id)
return None | 0.001217 |
def seek(self, frames, whence=SEEK_SET):
"""Set the read/write position.
Parameters
----------
frames : int
The frame index or offset to seek.
whence : {SEEK_SET, SEEK_CUR, SEEK_END}, optional
By default (``whence=SEEK_SET``), `frames` are counted from
the beginning of the file.
``whence=SEEK_CUR`` seeks from the current position
(positive and negative values are allowed for `frames`).
``whence=SEEK_END`` seeks from the end (use negative value
for `frames`).
Returns
-------
int
The new absolute read/write position in frames.
Examples
--------
>>> from soundfile import SoundFile, SEEK_END
>>> myfile = SoundFile('stereo_file.wav')
Seek to the beginning of the file:
>>> myfile.seek(0)
0
Seek to the end of the file:
>>> myfile.seek(0, SEEK_END)
44100 # this is the file length
"""
self._check_if_closed()
position = _snd.sf_seek(self._file, frames, whence)
_error_check(self._errorcode)
return position | 0.001671 |
def find_author(self):
"""Get the author information from the version control system."""
return Author(name=self.context.capture('git', 'config', 'user.name', check=False, silent=True),
email=self.context.capture('git', 'config', 'user.email', check=False, silent=True)) | 0.012987 |
def read(self, size=None):
"""
Reads `size` bytes from current directory entry. If `size` is empty,
it'll read all data till entry's end.
"""
if self._is_mini:
self.seek(self._position)
else:
self.source.seek(self._source_position)
if not size or size < 0:
size = self.size - self.tell()
data = b('')
while len(data) < size:
if self.tell() > self.size:
break
if self._sector_number == ENDOFCHAIN:
break
to_read = size - len(data)
to_end = self.sector_size - self._position_in_sector
to_do = min(to_read, to_end)
data += self.stream.read(to_do)
self._position += to_do
self._source_position = self.source.tell()
if to_read >= to_end:
self._position_in_sector = 0
self._sector_number = self.next_sector(self._sector_number)
position = (self._sector_number + int(not self._is_mini)) \
<< self.sector_shift
self.stream.seek(position)
else:
self._position_in_sector += to_do
return data | 0.001594 |
def faz(input_file, variables=None):
"""
FAZ entry point.
"""
logging.debug("input file:\n {0}\n".format(input_file))
tasks = parse_input_file(input_file, variables=variables)
print("Found {0} tasks.".format(len(tasks)))
graph = DependencyGraph(tasks)
graph.show_tasks()
graph.execute() | 0.003106 |
def ckw01(handle, begtim, endtim, inst, ref, avflag, segid, nrec, sclkdp, quats,
avvs):
"""
Add a type 1 segment to a C-kernel.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckw01_c.html
:param handle: Handle of an open CK file.
:type handle: int
:param begtim: The beginning encoded SCLK of the segment.
:type begtim: float
:param endtim: The ending encoded SCLK of the segment.
:type endtim: float
:param inst: The NAIF instrument ID code.
:type inst: int
:param ref: The reference frame of the segment.
:type ref: str
:param avflag: True if the segment will contain angular velocity.
:type avflag: bool
:param segid: Segment identifier.
:type segid: str
:param nrec: Number of pointing records.
:type nrec: int
:param sclkdp: Encoded SCLK times.
:type sclkdp: Array of floats
:param quats: Quaternions representing instrument pointing.
:type quats: Nx4-Element Array of floats
:param avvs: Angular velocity vectors.
:type avvs: Nx3-Element Array of floats
"""
handle = ctypes.c_int(handle)
begtim = ctypes.c_double(begtim)
endtim = ctypes.c_double(endtim)
inst = ctypes.c_int(inst)
ref = stypes.stringToCharP(ref)
avflag = ctypes.c_int(avflag)
segid = stypes.stringToCharP(segid)
sclkdp = stypes.toDoubleVector(sclkdp)
quats = stypes.toDoubleMatrix(quats)
avvs = stypes.toDoubleMatrix(avvs)
nrec = ctypes.c_int(nrec)
libspice.ckw01_c(handle, begtim, endtim, inst, ref, avflag, segid, nrec,
sclkdp, quats, avvs) | 0.001244 |
def active(self):
"""
Get a list of active projects.
:return list: A list of tuples containing a title and pageid in that order.
"""
projects = []
r = requests.get(self.api,
params={'action': 'query', 'list': 'categorymembers', 'cmpageid': self.active_id,
'cmtype': 'page', 'cmlimit': '500', 'format': 'json'},
headers=self.header)
if r.status_code == 200:
jsd = r.json()
projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']])
if 'query-continue' in jsd:
while True:
r = requests.get(self.api,
params={'action': 'query', 'list': 'categorymembers',
'cmpageid': self.active_id, 'cmtype': 'page', 'cmlimit': '500',
'cmcontinue': jsd['query-continue']['categorymembers']['cmcontinue'],
'format': 'json'},
headers=self.header)
if r.status_code == 200:
jsd = r.json()
projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']])
if 'query-continue' not in jsd:
break
else:
break
return projects[0] | 0.006494 |
def register_report(self, reportid, r_title, r_cb, checker):
"""register a report
reportid is the unique identifier for the report
r_title the report's title
r_cb the method to call to make the report
checker is the checker defining the report
"""
reportid = reportid.upper()
self._reports[checker].append((reportid, r_title, r_cb)) | 0.005038 |
def inter(a, b):
"""
Intersect two sets of any data type to form a third set.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/inter_c.html
:param a: First input set.
:type a: spiceypy.utils.support_types.SpiceCell
:param b: Second input set.
:type b: spiceypy.utils.support_types.SpiceCell
:return: Intersection of a and b.
:rtype: spiceypy.utils.support_types.SpiceCell
"""
assert isinstance(a, stypes.SpiceCell)
assert isinstance(b, stypes.SpiceCell)
assert a.dtype == b.dtype
# Next line was redundant with [raise NotImpImplementedError] below
# assert a.dtype == 0 or a.dtype == 1 or a.dtype == 2
if a.dtype is 0:
c = stypes.SPICECHAR_CELL(max(a.size, b.size), max(a.length, b.length))
elif a.dtype is 1:
c = stypes.SPICEDOUBLE_CELL(max(a.size, b.size))
elif a.dtype is 2:
c = stypes.SPICEINT_CELL(max(a.size, b.size))
else:
raise NotImplementedError
libspice.inter_c(ctypes.byref(a), ctypes.byref(b), ctypes.byref(c))
return c | 0.000946 |
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode']) | 0.002471 |
def get_plug_mro(self, plug_type):
"""Returns a list of names identifying the plug classes in the plug's MRO.
For example:
['openhtf.plugs.user_input.UserInput']
Or:
['openhtf.plugs.user_input.UserInput',
'my_module.advanced_user_input.AdvancedUserInput']
"""
ignored_classes = (BasePlug, FrontendAwareBasePlug)
return [
self.get_plug_name(base_class) for base_class in plug_type.mro()
if (issubclass(base_class, BasePlug) and
base_class not in ignored_classes)
] | 0.001835 |
def get(self, id, service='facebook', type='analysis'):
""" Get a given Pylon task
:param id: The ID of the task
:type id: str
:param service: The PYLON service (facebook)
:type service: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`,
:class:`requests.exceptions.HTTPError`
"""
return self.request.get(service + '/task/' + type + '/' + id) | 0.003425 |
def is_transition_matrix(T, tol=1e-10):
"""
Tests whether T is a transition matrix
Parameters
----------
T : ndarray shape=(n, n)
matrix to test
tol : float
tolerance to check with
Returns
-------
Truth value : bool
True, if all elements are in interval [0, 1]
and each row of T sums up to 1.
False, otherwise
"""
if T.ndim != 2:
return False
if T.shape[0] != T.shape[1]:
return False
dim = T.shape[0]
X = np.abs(T) - T
x = np.sum(T, axis=1)
return np.abs(x - np.ones(dim)).max() < dim * tol and X.max() < 2.0 * tol | 0.00156 |
def generateNodeDocuments(self):
'''
Creates all of the reStructuredText documents related to types parsed by
Doxygen. This includes all leaf-like documents (``class``, ``struct``,
``enum``, ``typedef``, ``union``, ``variable``, and ``define``), as well as
namespace, file, and directory pages.
During the reparenting phase of the parsing process, nested items were added as
a child to their actual parent. For classes, structs, enums, and unions, if
it was reparented to a ``namespace`` it will *remain* in its respective
``self.<breathe_kind>`` list. However, if it was an internally declared child
of a class or struct (nested classes, structs, enums, and unions), this node
will be removed from its ``self.<breathe_kind>`` list to avoid duplication in
the class hierarchy generation.
When generating the full API, though, we will want to include all of these and
therefore must call :func:`~exhale.graph.ExhaleRoot.generateSingleNodeRST` with
all of the nested items. For nested classes and structs, this is done by just
calling ``node.findNestedClassLike`` for every node in ``self.class_like``. The
resulting list then has all of ``self.class_like``, as well as any nested
classes and structs found. With ``enum`` and ``union``, these would have been
reparented to a **class** or **struct** if it was removed from the relevant
``self.<breathe_kind>`` list. Meaning we must make sure that we genererate the
single node RST documents for everything by finding the nested enums and unions
from ``self.class_like``, as well as everything in ``self.enums`` and
``self.unions``.
'''
# initialize all of the nodes first
for node in self.all_nodes:
self.initializeNodeFilenameAndLink(node)
self.adjustFunctionTitles()
# now that all potential ``node.link_name`` members are initialized, generate
# the leaf-like documents
for node in self.all_nodes:
if node.kind in utils.LEAF_LIKE_KINDS:
self.generateSingleNodeRST(node)
# generate the remaining parent-like documents
self.generateNamespaceNodeDocuments()
self.generateFileNodeDocuments()
self.generateDirectoryNodeDocuments() | 0.00792 |
def lines(self, line_length=None):
"""
Generates a list with lines. These lines form the text drawing.
Args:
line_length (int): Optional. Breaks the circuit drawing to this length. This
useful when the drawing does not fit in the console. If
None (default), it will try to guess the console width using
shutil.get_terminal_size(). If you don't want pagination
at all, set line_length=-1.
Returns:
list: A list of lines with the text drawing.
"""
if line_length is None:
line_length = self.line_length
if line_length is None:
if ('ipykernel' in sys.modules) and ('spyder' not in sys.modules):
line_length = 80
else:
line_length, _ = get_terminal_size()
noqubits = len(self.qregs)
layers = self.build_layers()
if not line_length:
line_length = self.line_length
layer_groups = [[]]
rest_of_the_line = line_length
for layerno, layer in enumerate(layers):
# Replace the Nones with EmptyWire
layers[layerno] = EmptyWire.fillup_layer(layer, noqubits)
TextDrawing.normalize_width(layer)
if line_length == -1:
# Do not use pagination (aka line breaking. aka ignore line_length).
layer_groups[-1].append(layer)
continue
# chop the layer to the line_length (pager)
layer_length = layers[layerno][0].length
if layer_length < rest_of_the_line:
layer_groups[-1].append(layer)
rest_of_the_line -= layer_length
else:
layer_groups[-1].append(BreakWire.fillup_layer(len(layer), '»'))
# New group
layer_groups.append([BreakWire.fillup_layer(len(layer), '«')])
rest_of_the_line = line_length - layer_groups[-1][-1][0].length
layer_groups[-1].append(
InputWire.fillup_layer(self.wire_names(with_initial_value=False)))
rest_of_the_line -= layer_groups[-1][-1][0].length
layer_groups[-1].append(layer)
rest_of_the_line -= layer_groups[-1][-1][0].length
lines = []
for layer_group in layer_groups:
wires = [i for i in zip(*layer_group)]
lines += TextDrawing.draw_wires(wires, self.vertically_compressed)
return lines | 0.003448 |
def tree(config=None,
path=None,
with_tags=False,
saltenv='base'):
'''
Transform Cisco IOS style configuration to structured Python dictionary.
Depending on the value of the ``with_tags`` argument, this function may
provide different views, valuable in different situations.
config
The configuration sent as text. This argument is ignored when ``path``
is configured.
path
Absolute or remote path from where to load the configuration text. This
argument allows any URI supported by
:py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``,
``https://``, ``s3://``, ``ftp:/``, etc.
with_tags: ``False``
Whether this function should return a detailed view, with tags.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file.
Ignored if ``path`` is not a ``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' iosconfig.tree path=salt://path/to/my/config.txt
salt '*' iosconfig.tree path=https://bit.ly/2mAdq7z
'''
if path:
config = __salt__['cp.get_file_str'](path, saltenv=saltenv)
if config is False:
raise SaltException('{} is not available'.format(path))
config_lines = config.splitlines()
return _parse_text_config(config_lines, with_tags=with_tags) | 0.000716 |
def show_command(endpoint_id, rule_id):
"""
Executor for `globus endpoint permission show`
"""
client = get_client()
rule = client.get_endpoint_acl_rule(endpoint_id, rule_id)
formatted_print(
rule,
text_format=FORMAT_TEXT_RECORD,
fields=(
("Rule ID", "id"),
("Permissions", "permissions"),
("Shared With", _shared_with_keyfunc),
("Path", "path"),
),
) | 0.002174 |
def __parse_aliases_line(self, raw_alias, raw_username):
"""Parse aliases lines"""
alias = self.__encode(raw_alias)
username = self.__encode(raw_username)
return alias, username | 0.009479 |
def get_argument_starttime(self):
"""
Helper function to get starttime argument.
Raises exception if argument is missing.
Returns the starttime argument.
"""
try:
starttime = self.get_argument(constants.PARAM_STARTTIME)
return starttime
except tornado.web.MissingArgumentError as e:
raise Exception(e.log_message) | 0.011142 |
def imagetransformer_base_8l_8h_big_cond_dr03_dan():
"""big 1d model for conditional image generation.2.99 on cifar10."""
hparams = imagetransformer_sep_channels_8l()
hparams.block_width = 256
hparams.block_length = 256
hparams.hidden_size = 512
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 4
hparams.max_length = 3075
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.num_decoder_layers = 8
hparams.layer_prepostprocess_dropout = 0.3
return hparams | 0.027473 |
def is_valid_embedding(emb, source, target):
"""A simple (bool) diagnostic for minor embeddings.
See :func:`diagnose_embedding` for a more detailed diagnostic / more information.
Args:
emb (dict): a dictionary mapping source nodes to arrays of target nodes
source (graph or edgelist): the graph to be embedded
target (graph or edgelist): the graph being embedded into
Returns:
bool: True if `emb` is valid.
"""
for _ in diagnose_embedding(emb, source, target):
return False
return True | 0.003591 |
def command_line_arguments(command_line_parameters):
"""Defines the command line parameters that are accepted."""
# create parser
parser = argparse.ArgumentParser(description='Execute baseline algorithms with default parameters', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# add parameters
# - the algorithm to execute
parser.add_argument('-a', '--algorithms', choices = all_algorithms, default = ('gmm-voxforge',), nargs = '+', help = 'Select one (or more) algorithms that you want to execute.')
parser.add_argument('--all', action = 'store_true', help = 'Select all algorithms.')
# - the database to choose
parser.add_argument('-d', '--database', choices = available_databases, default = 'voxforge', help = 'The database on which the baseline algorithm is executed.')
# - the database to choose
parser.add_argument('-b', '--baseline-directory', default = 'baselines', help = 'The sub-directory, where the baseline results are stored.')
# - the directory to write
parser.add_argument('-f', '--directory', help = 'The directory to write the data of the experiment into. If not specified, the default directories of the verify.py script are used (see verify.py --help).')
# - use the Idiap grid -- option is only useful if you are at Idiap
parser.add_argument('-g', '--grid', action = 'store_true', help = 'Execute the algorithm in the SGE grid.')
# - run in parallel on the local machine
parser.add_argument('-l', '--parallel', type=int, help = 'Run the algorithms in parallel on the local machine, using the given number of parallel threads')
# - perform ZT-normalization
parser.add_argument('-z', '--zt-norm', action = 'store_false', help = 'Compute the ZT norm for the files (might not be availabe for all databases).')
# - just print?
parser.add_argument('-q', '--dry-run', action = 'store_true', help = 'Just print the commands, but do not execute them.')
# - evaluate the algorithm (after it has finished)
parser.add_argument('-e', '--evaluate', nargs='+', choices = ('EER', 'HTER', 'ROC', 'DET', 'CMC', 'RR'), help = 'Evaluate the results of the algorithms (instead of running them) using the given evaluation techniques.')
# TODO: add MIN-DCT measure
# - other parameters that are passed to the underlying script
parser.add_argument('parameters', nargs = argparse.REMAINDER, help = 'Parameters directly passed to the verify.py script.')
bob.core.log.add_command_line_option(parser)
args = parser.parse_args(command_line_parameters)
if args.all:
args.algorithms = all_algorithms
bob.core.log.set_verbosity_level(logger, args.verbose)
return args | 0.034104 |
def interpolate(self, transform, transitions=None, Y=None):
"""Interpolate new data onto a transformation of the graph data
One of either transitions or Y should be provided
Parameters
----------
transform : array-like, shape=[n_samples, n_transform_features]
transitions : array-like, optional, shape=[n_samples_y, n_samples]
Transition matrix from `Y` (not provided) to `self.data`
Y: array-like, optional, shape=[n_samples_y, n_features]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
Y_transform : array-like, [n_samples_y, n_features or n_pca]
Transition matrix from `Y` to `self.data`
"""
if transitions is None and Y is None:
# assume Y is self.data and use standard landmark transitions
transitions = self.transitions
return super().interpolate(transform, transitions=transitions, Y=Y) | 0.001832 |
async def reject(self, reason: str = '') -> None:
"""
Reject the request.
:param reason: reason to reject (only works in group request)
"""
try:
await self.bot.call_action(
action='.handle_quick_operation_async',
self_id=self.ctx.get('self_id'),
context=self.ctx,
operation={'approve': False, 'reason': reason}
)
except CQHttpError:
pass | 0.004115 |
def close (self):
"""Get results and close clamd daemon sockets."""
self.wsock.close()
data = self.sock.recv(self.sock_rcvbuf)
while data:
if "FOUND\n" in data:
self.infected.append(data)
if "ERROR\n" in data:
self.errors.append(data)
data = self.sock.recv(self.sock_rcvbuf)
self.sock.close() | 0.0075 |
def update_version_records(self):
"""
Update rash_info table if necessary.
"""
from .__init__ import __version__ as version
with self.connection(commit=True) as connection:
for vrec in self.get_version_records():
if (vrec.rash_version == version and
vrec.schema_version == schema_version):
return # no need to insert the new one!
connection.execute(
'INSERT INTO rash_info (rash_version, schema_version) '
'VALUES (?, ?)',
[version, schema_version]) | 0.004854 |
def _add_flow_v1_0(self, src, port, timeout, datapath):
"""enter a flow entry for the packet from the slave i/f
with idle_timeout. for OpenFlow ver1.0."""
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch(
in_port=port, dl_src=addrconv.mac.text_to_bin(src),
dl_type=ether.ETH_TYPE_SLOW)
actions = [parser.OFPActionOutput(
ofproto.OFPP_CONTROLLER, 65535)]
mod = parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=timeout,
priority=65535, flags=ofproto.OFPFF_SEND_FLOW_REM,
actions=actions)
datapath.send_msg(mod) | 0.002688 |
def n_join(self, other, psi=-40.76, omega=-178.25, phi=-65.07,
o_c_n_angle=None, c_n_ca_angle=None, c_n_length=None, relabel=True):
"""Joins other to self at the N-terminus via a peptide bond.
Notes
-----
This function directly modifies self. It does not return a new object.
Parameters
----------
other: Residue or Polypeptide
psi: float
Psi torsion angle (degrees) between final `Residue` of other
and first `Residue` of self.
omega: float
Omega torsion angle (degrees) between final `Residue` of
other and first `Residue` of self.
phi: float
Phi torsion angle (degrees) between final `Residue` of other
and first `Residue` of self.
o_c_n_angle: float or None
Desired angle between O, C (final `Residue` of other) and N
(first `Residue` of self) atoms. If `None`, default value is
taken from `ideal_backbone_bond_angles`.
c_n_ca_angle: float or None
Desired angle between C (final `Residue` of other) and N, CA
(first `Residue` of self) atoms. If `None`, default value is taken
from `ideal_backbone_bond_angles`.
c_n_length: float or None
Desired peptide bond length between final `Residue` of other
and first `Residue` of self. If None, default value is taken
from ideal_backbone_bond_lengths.
relabel: bool
If True, relabel_all is run on self before returning.
Raises
------
TypeError:
If other is not a `Residue` or a `Polypeptide`
"""
if isinstance(other, Residue):
other = Polypeptide([other])
if not isinstance(other, Polypeptide):
raise TypeError(
'Only Polypeptide or Residue objects can be joined to a Polypeptide')
if abs(omega) >= 90:
peptide_conformation = 'trans'
else:
peptide_conformation = 'cis'
if o_c_n_angle is None:
o_c_n_angle = ideal_backbone_bond_angles[peptide_conformation]['o_c_n']
if c_n_ca_angle is None:
c_n_ca_angle = ideal_backbone_bond_angles[peptide_conformation]['c_n_ca']
if c_n_length is None:
c_n_length = ideal_backbone_bond_lengths['c_n']
r1 = self[0]
r1_n = r1['N']._vector
r1_ca = r1['CA']._vector
r1_c = r1['C']._vector
# p1 is point that will be used to position the C atom of r2.
p1 = r1_ca[:]
# rotate p1 by c_n_ca_angle, about axis perpendicular to the
# r1_n, r1_ca, r1_c plane, passing through r1_ca.
axis = numpy.cross((r1_ca - r1_n), (r1_c - r1_n))
q = Quaternion.angle_and_axis(angle=c_n_ca_angle, axis=axis)
p1 = q.rotate_vector(v=p1, point=r1_n)
# Ensure p1 is separated from r1_n by the correct distance.
p1 = r1_n + (c_n_length * unit_vector(p1 - r1_n))
# translate other so that its final C atom is at p1
other.translate(vector=(p1 - other[-1]['C']._vector))
# Force CA-C=O-N to be in a plane, and fix O=C-N angle accordingly
measured_dihedral = dihedral(
other[-1]['CA'], other[-1]['C'], other[-1]['O'], r1['N'])
desired_dihedral = 180.0
axis = other[-1]['O'] - other[-1]['C']
other.rotate(angle=(measured_dihedral - desired_dihedral),
axis=axis, point=other[-1]['C']._vector)
axis = (numpy.cross(other[-1]['O'] - other[-1]
['C'], r1['N'] - other[-1]['C']))
measured_o_c_n = angle_between_vectors(
other[-1]['O'] - other[-1]['C'], r1['N'] - other[-1]['C'])
other.rotate(angle=(measured_o_c_n - o_c_n_angle),
axis=axis, point=other[-1]['C']._vector)
# rotate other to obtain desired phi, omega, psi values at the join.
measured_phi = dihedral(other[-1]['C'], r1['N'], r1['CA'], r1['C'])
other.rotate(angle=(phi - measured_phi),
axis=(r1_n - r1_ca), point=r1_ca)
measured_omega = dihedral(
other[-1]['CA'], other[-1]['C'], r1['N'], r1['CA'])
other.rotate(angle=(measured_omega - omega),
axis=(r1['N'] - other[-1]['C']), point=r1_n)
measured_psi = dihedral(
other[-1]['N'], other[-1]['CA'], other[-1]['C'], r1['N'])
other.rotate(angle=-(measured_psi - psi), axis=(other[-1]['CA'] - other[-1]['C']),
point=other[-1]['CA']._vector)
self._monomers = other._monomers + self._monomers
if relabel:
self.relabel_all()
self.tags['assigned_ff'] = False
return | 0.001669 |
def meta_features_path(self, path):
"""Returns path for meta-features
Args:
path (str): Absolute/local path of xcessiv folder
"""
return os.path.join(
path,
app.config['XCESSIV_META_FEATURES_FOLDER'],
str(self.id)
) + '.npy' | 0.006079 |
def send_robust(self, **kwargs):
""" Trigger all receiver and pass them the parameters
If an exception is raised it will be catched and displayed as error
in the logger (if defined).
:param kwargs: all arguments from the event.
"""
for receiver in self.event_receivers:
try:
receiver(**kwargs)
except Exception as err: # pylint: disable=W0703
if not hasattr(err, '__traceback__'):
LOGGER.error(sys.exc_info()[2])
else:
LOGGER.error(getattr(err, '__traceback__')) | 0.003135 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.