text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def energy_at_conditions(self, pH, V):
"""
Get free energy for a given pH and V
Args:
pH (float): pH at which to evaluate free energy
V (float): voltage at which to evaluate free energy
Returns:
free energy at conditions
"""
return self.energy + self.npH * PREFAC * pH + self.nPhi * V | 0.005405 |
def set_bin_window(self, bin_size=None, window_size=None):
"""Set the bin and window sizes."""
bin_size = bin_size or self.bin_size
window_size = window_size or self.window_size
assert 1e-6 < bin_size < 1e3
assert 1e-6 < window_size < 1e3
assert bin_size < window_size
self.bin_size = bin_size
self.window_size = window_size
# Set the status message.
b, w = self.bin_size * 1000, self.window_size * 1000
self.set_status('Bin: {:.1f} ms. Window: {:.1f} ms.'.format(b, w)) | 0.003584 |
def nmap(a, b, c, d, curvefn=None, normfn=None):
"""
Returns a function that maps a number n from range (a, b) onto a range
(c, d). If no curvefn is given, linear mapping will be used. Optionally a
normalisation function normfn can be provided to transform output.
"""
if not curvefn:
curvefn = lambda x: x
def map(n):
r = 1.0 * (n - a) / (b - a)
out = curvefn(r) * (d - c) + c
if not normfn:
return out
return normfn(out)
return map | 0.003861 |
def get_activations(self):
"""Return a list of activations."""
res = (self.added, self.removed)
self.added = set()
self.removed = set()
return res | 0.010638 |
def write(self, offset, data):
"""
Write C{data} into this file at position C{offset}. Extending the
file past its original end is expected. Unlike python's normal
C{write()} methods, this method cannot do a partial write: it must
write all of C{data} or else return an error.
The default implementation checks for an attribute on C{self} named
C{writefile}, and if present, performs the write operation on the
python file-like object found there. The attribute is named
differently from C{readfile} to make it easy to implement read-only
(or write-only) files, but if both attributes are present, they should
refer to the same file.
@param offset: position in the file to start reading from.
@type offset: int or long
@param data: data to write into the file.
@type data: str
@return: an SFTP error code like L{SFTP_OK}.
"""
writefile = getattr(self, 'writefile', None)
if writefile is None:
return SFTP_OP_UNSUPPORTED
try:
# in append mode, don't care about seeking
if (self.__flags & os.O_APPEND) == 0:
if self.__tell is None:
self.__tell = writefile.tell()
if offset != self.__tell:
writefile.seek(offset)
self.__tell = offset
writefile.write(data)
writefile.flush()
except IOError, e:
self.__tell = None
return SFTPServer.convert_errno(e.errno)
if self.__tell is not None:
self.__tell += len(data)
return SFTP_OK | 0.001763 |
def save(self, *args, **kwargs):
"""
Custom save method does the following things:
* converts geometry collections of just 1 item to that item (eg: a collection of 1 Point becomes a Point)
* intercepts changes to status and fires node_status_changed signal
* set default status
"""
# geometry collection check
if isinstance(self.geometry, GeometryCollection) and 0 < len(self.geometry) < 2:
self.geometry = self.geometry[0]
# if no status specified
if not self.status and not self.status_id:
try:
self.status = Status.objects.filter(is_default=True)[0]
except IndexError:
pass
super(Node, self).save(*args, **kwargs)
# if status of a node changes
if (self.status and self._current_status and self.status.id != self._current_status) or\
(self.status_id and self._current_status and self.status_id != self._current_status):
# send django signal
node_status_changed.send(
sender=self.__class__,
instance=self,
old_status=Status.objects.get(pk=self._current_status),
new_status=self.status
)
# update _current_status
self._current_status = self.status_id | 0.004409 |
def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir) | 0.005102 |
def simulated_annealing(problem, schedule=_exp_schedule, iterations_limit=0, viewer=None):
'''
Simulated annealing.
schedule is the scheduling function that decides the chance to choose worst
nodes depending on the time.
If iterations_limit is specified, the algorithm will end after that
number of iterations. Else, it will continue until it can't find a
better node than the current one.
Requires: SearchProblem.actions, SearchProblem.result, and
SearchProblem.value.
'''
return _local_search(problem,
_create_simulated_annealing_expander(schedule),
iterations_limit=iterations_limit,
fringe_size=1,
stop_when_no_better=iterations_limit==0,
viewer=viewer) | 0.003628 |
def archive(self, repo_slug=None, format='zip', prefix=''):
""" Get one of your repositories and compress it as an archive.
Return the path of the archive.
format parameter is curently not supported.
"""
prefix = '%s'.lstrip('/') % prefix
self._get_files_in_dir(repo_slug=repo_slug, dir='/')
if self.bitbucket.repo_tree:
with NamedTemporaryFile(delete=False) as archive:
with ZipFile(archive, 'w') as zip_archive:
for name, file in self.bitbucket.repo_tree.items():
with NamedTemporaryFile(delete=False) as temp_file:
temp_file.write(file.encode('utf-8'))
zip_archive.write(temp_file.name, prefix + name)
return (True, archive.name)
return (False, 'Could not archive your project.') | 0.002245 |
def refresh(self, index=None, params=None):
"""
Explicitly refresh one or more index, making all operations performed
since the last refresh available for search.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
"""
return self.transport.perform_request(
"POST", _make_path(index, "_refresh"), params=params
) | 0.002695 |
def remote_image_request(self, image_url, params=None):
"""
Send an image for classification. The imagewill be retrieved from the
URL specified. The params parameter is optional.
On success this method will immediately return a job information. Its
status will initially be :py:data:`cloudsight.STATUS_NOT_COMPLETED` as
it usually takes 6-12 seconds for the server to process an image. In
order to retrieve the annotation data, you need to keep updating the job
status using the :py:meth:`cloudsight.API.image_response` method until
the status changes. You may also use the :py:meth:`cloudsight.API.wait`
method which does this automatically.
:param image_url: Image URL.
:param params: Additional parameters for CloudSight API.
"""
data = self._init_data(params)
data['image_request[remote_image_url]'] = image_url
response = requests.post(REQUESTS_URL, headers={
'Authorization': self.auth.authorize('POST', REQUESTS_URL, data),
'User-Agent': USER_AGENT,
}, data=data)
return self._unwrap_error(response) | 0.003378 |
def resume(ctx):
"""
Resume work on the currently active issue.
The issue is retrieved from the currently active branch name.
"""
lancet = ctx.obj
username = lancet.tracker.whoami()
active_status = lancet.config.get("tracker", "active_status")
# Get the issue
issue = get_issue(lancet)
# Make sure the issue is in a correct status
transition = get_transition(ctx, lancet, issue, active_status)
# Make sure the issue is assigned to us
assign_issue(lancet, issue, username, active_status)
# Activate environment
set_issue_status(lancet, issue, active_status, transition)
with taskstatus("Resuming harvest timer") as ts:
lancet.timer.start(issue)
ts.ok("Resumed harvest timer") | 0.001314 |
def successors(self, *args, **kwargs):
"""
Perform execution using any applicable engine. Enumerate the current engines and use the
first one that works. Return a SimSuccessors object classifying the results of the run.
:param state: The state to analyze
:param addr: optional, an address to execute at instead of the state's ip
:param jumpkind: optional, the jumpkind of the previous exit
:param inline: This is an inline execution. Do not bother copying the state.
Additional keyword arguments will be passed directly into each engine's process method.
"""
return self.project.engines.successors(*args, **kwargs) | 0.009537 |
def _conf_changed(self, arg):
"""
internal callback. from control-spec:
4.1.18. Configuration changed
The syntax is:
StartReplyLine *(MidReplyLine) EndReplyLine
StartReplyLine = "650-CONF_CHANGED" CRLF
MidReplyLine = "650-" KEYWORD ["=" VALUE] CRLF
EndReplyLine = "650 OK"
Tor configuration options have changed (such as via a SETCONF or
RELOAD signal). KEYWORD and VALUE specify the configuration option
that was changed. Undefined configuration options contain only the
KEYWORD.
"""
conf = parse_keywords(arg, multiline_values=False)
for (k, v) in conf.items():
# v will be txtorcon.DEFAULT_VALUE already from
# parse_keywords if it was unspecified
real_name = self._find_real_name(k)
if real_name in self.parsers:
v = self.parsers[real_name].parse(v)
self.config[real_name] = v | 0.001976 |
def bottom_axis_label(self, label, position=None, rotation=0, offset=0.02,
**kwargs):
"""
Sets the label on the bottom axis.
Parameters
----------
label: String
The axis label
position: 3-Tuple of floats, None
The position of the text label
rotation: float, 0
The angle of rotation of the label
offset: float,
Used to compute the distance of the label from the axis
kwargs:
Any kwargs to pass through to matplotlib.
"""
if not position:
position = (0.5, -offset / 2., 0.5)
self._labels["bottom"] = (label, position, rotation, kwargs) | 0.004115 |
def propose_unif(self):
"""Propose a new live point by sampling *uniformly*
within the unit cube."""
u = self.unitcube.sample(rstate=self.rstate)
ax = np.identity(self.npdim)
return u, ax | 0.008734 |
def some(predicate, *seqs):
"""
>>> some(lambda x: x, [0, False, None])
False
>>> some(lambda x: x, [None, 0, 2, 3])
2
>>> some(operator.eq, [0,1,2], [2,1,0])
True
>>> some(operator.eq, [1,2], [2,1])
False
"""
try:
if len(seqs) == 1: return ifilter(bool,imap(predicate, seqs[0])).next()
else: return ifilter(bool,starmap(predicate, izip(*seqs))).next()
except StopIteration: return False | 0.017279 |
def write(self, data):
"""Write data to the GoogleCloudStorage file.
Args:
data: string containing the data to be written.
"""
start_time = time.time()
self._get_write_buffer().write(data)
ctx = context.get()
operation.counters.Increment(COUNTER_IO_WRITE_BYTES, len(data))(ctx)
operation.counters.Increment(
COUNTER_IO_WRITE_MSEC, int((time.time() - start_time) * 1000))(ctx) | 0.00237 |
def resize(self, size, wait=True):
"""
Change the size of this droplet (must be powered off)
Parameters
----------
size: str
size slug, e.g., 512mb
wait: bool, default True
Whether to block until the pending action is completed
"""
return self._action('resize', size=size, wait=wait) | 0.005376 |
def fmap(self, f: Callable[[T], B]) -> 'List[B]':
"""doufo.List.fmap: map `List`
Args:
`self`:
`f` (`Callable[[T], B]`): any callable funtion
Returns:
return (`List[B]`): A `List` of objected from `f`.
Raises:
"""
return List([f(x) for x in self.unbox()]) | 0.022792 |
def ensure_unique_obs_ids_in_wide_data(obs_id_col, wide_data):
"""
Ensures that there is one observation per row in wide_data. Raises a
helpful ValueError if otherwise.
Parameters
----------
obs_id_col : str.
Denotes the column in `wide_data` that contains the observation ID
values for each row.
wide_data : pandas dataframe.
Contains one row for each observation. Should contain the specified
`obs_id_col` column.
Returns
-------
None.
"""
if len(wide_data[obs_id_col].unique()) != wide_data.shape[0]:
msg = "The values in wide_data[obs_id_col] are not unique, "
msg_2 = "but they need to be."
raise ValueError(msg + msg_2)
return None | 0.001337 |
def ui_open(*files):
"""Attempts to open the given files using the preferred desktop viewer or editor.
:raises :class:`OpenError`: if there is a problem opening any of the files.
"""
if files:
osname = get_os_name()
opener = _OPENER_BY_OS.get(osname)
if opener:
opener(files)
else:
raise OpenError('Open currently not supported for ' + osname) | 0.015789 |
def forward(self, inputs, token_types, valid_length=None): # pylint: disable=arguments-differ
"""Generate the unnormalized score for the given the input sequences.
Parameters
----------
inputs : NDArray, shape (batch_size, seq_length)
Input words for the sequences.
token_types : NDArray, shape (batch_size, seq_length)
Token types for the sequences, used to indicate whether the word belongs to the
first sentence or the second one.
valid_length : NDArray or None, shape (batch_size,)
Valid length of the sequence. This is used to mask the padded tokens.
Returns
-------
outputs : NDArray
Shape (batch_size, seq_length, 2)
"""
bert_output = self.bert(inputs, token_types, valid_length)
output = self.span_classifier(bert_output)
return output | 0.005495 |
def _dict_to_encryption_data(encryption_data_dict):
'''
Converts the specified dictionary to an EncryptionData object for
eventual use in decryption.
:param dict encryption_data_dict:
The dictionary containing the encryption data.
:return: an _EncryptionData object built from the dictionary.
:rtype: _EncryptionData
'''
try:
if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1:
raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
except KeyError:
raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION)
wrapped_content_key = encryption_data_dict['WrappedContentKey']
wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'],
_decode_base64_to_bytes(wrapped_content_key['EncryptedKey']),
wrapped_content_key['KeyId'])
encryption_agent = encryption_data_dict['EncryptionAgent']
encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'],
encryption_agent['Protocol'])
if 'KeyWrappingMetadata' in encryption_data_dict:
key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata']
else:
key_wrapping_metadata = None
encryption_data = _EncryptionData(_decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']),
encryption_agent,
wrapped_content_key,
key_wrapping_metadata)
return encryption_data | 0.003639 |
def start(self):
"""Start the daemon."""
if self.worker is None:
raise DaemonError('No worker is defined for daemon')
if os.environ.get('DAEMONOCLE_RELOAD'):
# If this is actually a reload, we need to wait for the
# existing daemon to exit first
self._emit_message('Reloading {prog} ... '.format(prog=self.prog))
# Orhpan this process so the parent can exit
self._orphan_this_process(wait_for_parent=True)
pid = self._read_pidfile()
if (pid is not None and
self._pid_is_alive(pid, timeout=self.stop_timeout)):
# The process didn't exit for some reason
self._emit_failed()
message = ('Previous process (PID {pid}) did NOT '
'exit during reload').format(pid=pid)
self._emit_error(message)
self._shutdown(message, 1)
# Check to see if the daemon is already running
pid = self._read_pidfile()
if pid is not None:
# I don't think this should not be a fatal error
self._emit_warning('{prog} already running with PID {pid}'.format(
prog=self.prog, pid=pid))
return
if not self.detach and not os.environ.get('DAEMONOCLE_RELOAD'):
# This keeps the original parent process open so that we
# maintain control of the tty
self._fork_and_supervise_child()
if not os.environ.get('DAEMONOCLE_RELOAD'):
# A custom message is printed for reloading
self._emit_message('Starting {prog} ... '.format(prog=self.prog))
self._setup_environment()
if self.detach:
self._detach_process()
else:
self._emit_ok()
if self.pidfile is not None:
self._write_pidfile()
# Setup signal handlers
signal.signal(signal.SIGINT, self._handle_terminate)
signal.signal(signal.SIGQUIT, self._handle_terminate)
signal.signal(signal.SIGTERM, self._handle_terminate)
self._run() | 0.000932 |
def _size_fmt(num):
'''
Format bytes as human-readable file sizes
'''
try:
num = int(num)
if num < 1024:
return '{0} bytes'.format(num)
num /= 1024.0
for unit in ('KiB', 'MiB', 'GiB', 'TiB', 'PiB'):
if num < 1024.0:
return '{0:3.1f} {1}'.format(num, unit)
num /= 1024.0
except Exception:
log.error('Unable to format file size for \'%s\'', num)
return 'unknown' | 0.002083 |
def dispatch(*funcs):
'''Iterates through the functions
and calls them with given the parameters
and returns the first non-empty result
>>> f = dispatch(lambda: None, lambda: 1)
>>> f()
1
:param \*funcs: funcs list of dispatched functions
:returns: dispatch functoin
'''
def _dispatch(*args, **kwargs):
for f in funcs:
result = f(*args, **kwargs)
if result is not None:
return result
return None
return _dispatch | 0.003876 |
def authenticate_by_password(cls, params):
""" Authenticate user with login and password from :params:
Used both by Token and Ticket-based auths (called from views).
"""
def verify_password(user, password):
return crypt.check(user.password, password)
success = False
user = None
login = params['login'].lower().strip()
key = 'email' if '@' in login else 'username'
try:
user = cls.get_item(**{key: login})
except Exception as ex:
log.error(str(ex))
if user:
password = params.get('password', None)
success = (password and verify_password(user, password))
return success, user | 0.002725 |
def vlm_add_input(self, psz_name, psz_input):
'''Add a media's input MRL. This will add the specified one.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_add_input(self, str_to_bytes(psz_name), str_to_bytes(psz_input)) | 0.008596 |
def getInstrParameter(self, value, header, keyword):
""" This method gets a instrument parameter from a
pair of task parameters: a value, and a header keyword.
The default behavior is:
- if the value and header keyword are given, raise an exception.
- if the value is given, use it.
- if the value is blank and the header keyword is given, use
the header keyword.
- if both are blank, or if the header keyword is not
found, return None.
"""
if isinstance(value, str) and value in ['None', '', ' ', 'INDEF']:
value = None
if value and (keyword is not None and keyword.strip() != ''):
exceptionMessage = "ERROR: Your input is ambiguous! Please specify either a value or a keyword.\n You specifed both " + str(value) + " and " + str(keyword)
raise ValueError(exceptionMessage)
elif value is not None and value != '':
return self._averageFromList(value)
elif keyword is not None and keyword.strip() != '':
return self._averageFromHeader(header, keyword)
else:
return None | 0.002471 |
def get_geno_marker(self, marker, return_index=False):
"""Gets the genotypes for a given marker.
Args:
marker (str): The name of the marker.
return_index (bool): Wether to return the marker's index or not.
Returns:
numpy.ndarray: The genotypes of the marker (additive format).
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
# Check if the marker exists
if marker not in self._bim.index:
raise ValueError("{}: marker not in BIM".format(marker))
# Seeking to the correct position
seek_index = self._bim.loc[marker, "i"]
self.seek(seek_index)
if return_index:
return self._read_current_marker(), seek_index
return self._read_current_marker() | 0.002361 |
def _frozensetload(l: Loader, value, type_) -> FrozenSet:
"""
This loads into something like FrozenSet[int]
"""
t = type_.__args__[0]
return frozenset(l.load(i, t) for i in value) | 0.01005 |
def merge(data,skip=50,fraction=1.0):
"""Merge one every 'skip' clouds into a single emcee population,
using the later 'fraction' of the run."""
w,s,d = data.chains.shape
start = int((1.0 - fraction) * s)
total = int((s - start) / skip)
return data.chains[:,start::skip,:].reshape((w*total,d)) | 0.03012 |
def visit_set(self, node):
"""return an astroid.Set node as string"""
return "{%s}" % ", ".join(child.accept(self) for child in node.elts) | 0.012987 |
def findSynonyms(self, word, num):
"""
Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns a dataframe with two fields word and similarity (which
gives the cosine similarity).
"""
if not isinstance(word, basestring):
word = _convert_to_vector(word)
return self._call_java("findSynonyms", word, num) | 0.004577 |
def _set_member_entry(self, v, load=False):
"""
Setter method for member_entry, mapped from YANG variable /rbridge_id/secpolicy/defined_policy/policies/member_entry (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_member_entry is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_member_entry() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("member",member_entry.member_entry, yang_name="member-entry", rest_name="member-entry", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='member', extensions={u'tailf-common': {u'info': u'List of defined members', u'cli-suppress-list-no': None, u'callpoint': u'secpolicy_defined_policy_member', u'cli-suppress-key-abbreviation': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="member-entry", rest_name="member-entry", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined members', u'cli-suppress-list-no': None, u'callpoint': u'secpolicy_defined_policy_member', u'cli-suppress-key-abbreviation': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """member_entry must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("member",member_entry.member_entry, yang_name="member-entry", rest_name="member-entry", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='member', extensions={u'tailf-common': {u'info': u'List of defined members', u'cli-suppress-list-no': None, u'callpoint': u'secpolicy_defined_policy_member', u'cli-suppress-key-abbreviation': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="member-entry", rest_name="member-entry", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined members', u'cli-suppress-list-no': None, u'callpoint': u'secpolicy_defined_policy_member', u'cli-suppress-key-abbreviation': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='list', is_config=True)""",
})
self.__member_entry = t
if hasattr(self, '_set'):
self._set() | 0.004064 |
def close(self):
"""Close the file. Can be called multiple times."""
if not self.closed:
# be sure to flush data to disk before closing the file
self.flush()
err = _snd.sf_close(self._file)
self._file = None
_error_check(err) | 0.006623 |
def add(modname, features, required_version, installed_version=None,
optional=False):
"""Add Spyder dependency"""
global DEPENDENCIES
for dependency in DEPENDENCIES:
if dependency.modname == modname:
raise ValueError("Dependency has already been registered: %s"\
% modname)
DEPENDENCIES += [Dependency(modname, features, required_version,
installed_version, optional)] | 0.004149 |
def clean_value(self, val):
"""
val =
:param dict val: {"content":"", "name":"", "ext":"", "type":""}
:return:
"""
if isinstance(val, dict):
if self.random_name:
val['random_name'] = self.random_name
if 'file_name' in val.keys():
val['name'] = val.pop('file_name')
val['content'] = val.pop('file_content')
return self.file_manager().store_file(**val)
# If val is not instance of dict, it should be return itself because the val is the key of
# the file
try:
return str(val)
except ValueError:
raise ValidationError("%r could not be cast to string" % val) | 0.004038 |
def failed_update(self, exception):
"""Update cluster state given a failed MetadataRequest."""
f = None
with self._lock:
if self._future:
f = self._future
self._future = None
if f:
f.failure(exception)
self._last_refresh_ms = time.time() * 1000 | 0.005882 |
def psv2pl(point, span1, span2):
"""
Make a CSPICE plane from a point and two spanning vectors.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/psv2pl_c.html
:param point: A Point.
:type point: 3-Element Array of floats
:param span1: First Spanning vector.
:type span1: 3-Element Array of floats
:param span2: Second Spanning vector.
:type span2: 3-Element Array of floats
:return: A SPICE plane.
:rtype: supportypes.Plane
"""
point = stypes.toDoubleVector(point)
span1 = stypes.toDoubleVector(span1)
span2 = stypes.toDoubleVector(span2)
plane = stypes.Plane()
libspice.psv2pl_c(point, span1, span2, ctypes.byref(plane))
return plane | 0.001399 |
def window_lanczos(N):
r"""Lanczos window also known as sinc window.
:param N: window length
.. math:: w(n) = sinc \left( \frac{2n}{N-1} - 1 \right)
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'lanczos')
.. seealso:: :func:`create_window`, :class:`Window`
"""
if N ==1:
return ones(1)
n = linspace(-N/2., N/2., N)
win = sinc(2*n/(N-1.))
return win | 0.004184 |
def load_model_from_path(model_path, meta=False, **overrides):
"""Load a model from a data directory path. Creates Language class with
pipeline from meta.json and then calls from_disk() with path."""
from .tokenizer_loader import TokenizerLoader
if not meta:
meta = get_model_meta(model_path)
tokenizer_loader = TokenizerLoader(meta=meta, **overrides)
tokenizers = meta.get('tokenizers', [])
disable = overrides.get('disable', [])
if tokenizers is True:
tokenizers = TokenizerLoader.Defaults.tokenizers
elif tokenizers in (False, None):
tokenizers = []
for tokenizer_name in tokenizers:
if tokenizer_name not in disable:
config = meta.get('tokenizer_args', {}).get(tokenizer_name, {})
component = tokenizer_loader.create_tokenizer(tokenizer_name, config=config)
tokenizer_loader.add_tokenizer(component, name=tokenizer_name)
return tokenizer_loader.from_disk(model_path) | 0.002035 |
def is_tile_strictly_isolated(hand_34, tile_34):
"""
Tile is strictly isolated if it doesn't have -2, -1, 0, +1, +2 neighbors
:param hand_34: array of tiles in 34 tile format
:param tile_34: int
:return: bool
"""
hand_34 = copy.copy(hand_34)
# we don't need to count target tile in the hand
hand_34[tile_34] -= 1
if hand_34[tile_34] < 0:
hand_34[tile_34] = 0
indices = []
if is_honor(tile_34):
return hand_34[tile_34] == 0
else:
simplified = simplify(tile_34)
# 1 suit tile
if simplified == 0:
indices = [tile_34, tile_34 + 1, tile_34 + 2]
# 2 suit tile
elif simplified == 1:
indices = [tile_34 - 1, tile_34, tile_34 + 1, tile_34 + 2]
# 8 suit tile
elif simplified == 7:
indices = [tile_34 - 2, tile_34 - 1, tile_34, tile_34 + 1]
# 9 suit tile
elif simplified == 8:
indices = [tile_34 - 2, tile_34 - 1, tile_34]
# 3-7 tiles tiles
else:
indices = [tile_34 - 2, tile_34 - 1, tile_34, tile_34 + 1, tile_34 + 2]
return all([hand_34[x] == 0 for x in indices]) | 0.001699 |
def _recv_flow(self, method_frame):
'''
Receive a flow control command from the broker
'''
self.channel._active = method_frame.args.read_bit()
args = Writer()
args.write_bit(self.channel.active)
self.send_frame(MethodFrame(self.channel_id, 20, 21, args))
if self._flow_control_cb is not None:
self._flow_control_cb() | 0.005076 |
def build_sample_paths(sample):
"""
Ensure existence of folders for a Sample.
:param looper.models.Sample sample: Sample (or instance supporting get()
that stores folders paths in a 'paths' key, in which the value is a
mapping from path name to actual folder path)
"""
for path_name, path in sample.paths.items():
print("{}: '{}'".format(path_name, path))
base, ext = os.path.splitext(path)
if ext:
print("Skipping file-like: '[}'".format(path))
elif not os.path.isdir(base):
os.makedirs(base) | 0.001706 |
def remove_root_bank(self, bank_id):
"""Removes a root bank from this hierarchy.
arg: bank_id (osid.id.Id): the ``Id`` of a bank
raise: NotFound - ``bank_id`` not a parent of ``child_id``
raise: NullArgument - ``bank_id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_root_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_root_catalog(catalog_id=bank_id)
return self._hierarchy_session.remove_root(id_=bank_id) | 0.003788 |
def render(self, display):
"""Render basicly the text."""
# to handle changing objects / callable
if self.shawn_text != self._last_text:
self._render()
if self.text:
papy = self._surface.get_width()
if papy <= self.width:
display.blit(self._surface, (self.topleft, self.size))
else:
display.blit(self._surface, (self.topleft, self.size), ((papy - self.w, 0), self.size))
else:
display.blit(self.default_text, (self.topleft, self.size))
if self._focus:
groom = self.cursor_pos()
line(display, (groom, self.top), (groom, self.bottom), CONCRETE) | 0.004231 |
def load_config_vars(target_config, source_config):
"""Loads all attributes from source config into target config
@type target_config: TestRunConfigManager
@param target_config: Config to dump variables into
@type source_config: TestRunConfigManager
@param source_config: The other config
@return: True
"""
# Overwrite all attributes in config with new config
for attr in dir(source_config):
# skip all private class attrs
if attr.startswith('_'):
continue
val = getattr(source_config, attr)
if val is not None:
setattr(target_config, attr, val) | 0.001567 |
def build_ref_list(refs):
"""
Given parsed references build a list of ref objects
"""
ref_list = []
for reference in refs:
ref = ea.Citation()
# Publcation Type
utils.set_attr_if_value(ref, 'publication_type', reference.get('publication-type'))
# id
utils.set_attr_if_value(ref, 'id', reference.get('id'))
# Article title
utils.set_attr_if_value(ref, 'article_title', reference.get('full_article_title'))
# Source
utils.set_attr_if_value(ref, 'source', reference.get('source'))
# Volume
utils.set_attr_if_value(ref, 'volume', reference.get('volume'))
# Issue
utils.set_attr_if_value(ref, 'issue', reference.get('issue'))
# First page
utils.set_attr_if_value(ref, 'fpage', reference.get('fpage'))
# Last page
utils.set_attr_if_value(ref, 'lpage', reference.get('lpage'))
# DOI
utils.set_attr_if_value(ref, 'doi', reference.get('doi'))
# Year
utils.set_attr_if_value(ref, 'year', reference.get('year'))
# Year date in iso 8601 format
utils.set_attr_if_value(ref, 'year_iso_8601_date', reference.get('year-iso-8601-date'))
# Can set the year_numeric now
if ref.year_iso_8601_date is not None:
# First preference take it from the iso 8601 date, if available
try:
ref.year_numeric = int(ref.year_iso_8601_date.split('-')[0])
except ValueError:
ref.year_numeric = None
if ref.year_numeric is None:
# Second preference, use the year value if it is entirely numeric
if utils.is_year_numeric(ref.year):
ref.year_numeric = ref.year
# date-in-citation
utils.set_attr_if_value(ref, 'date_in_citation', reference.get('date-in-citation'))
# elocation-id
utils.set_attr_if_value(ref, 'elocation_id', reference.get('elocation-id'))
# uri
utils.set_attr_if_value(ref, 'uri', reference.get('uri'))
if not ref.uri:
# take uri value from uri_text
utils.set_attr_if_value(ref, 'uri', reference.get('uri_text'))
# pmid
utils.set_attr_if_value(ref, 'pmid', reference.get('pmid'))
# isbn
utils.set_attr_if_value(ref, 'isbn', reference.get('isbn'))
# accession
utils.set_attr_if_value(ref, 'accession', reference.get('accession'))
# patent
utils.set_attr_if_value(ref, 'patent', reference.get('patent'))
# patent country
utils.set_attr_if_value(ref, 'country', reference.get('country'))
# publisher-loc
utils.set_attr_if_value(ref, 'publisher_loc', reference.get('publisher_loc'))
# publisher-name
utils.set_attr_if_value(ref, 'publisher_name', reference.get('publisher_name'))
# edition
utils.set_attr_if_value(ref, 'edition', reference.get('edition'))
# version
utils.set_attr_if_value(ref, 'version', reference.get('version'))
# chapter-title
utils.set_attr_if_value(ref, 'chapter_title', reference.get('chapter-title'))
# comment
utils.set_attr_if_value(ref, 'comment', reference.get('comment'))
# data-title
utils.set_attr_if_value(ref, 'data_title', reference.get('data-title'))
# conf-name
utils.set_attr_if_value(ref, 'conf_name', reference.get('conf-name'))
# Authors
if reference.get('authors'):
for author in reference.get('authors'):
ref_author = {}
eautils.set_if_value(ref_author, 'group-type', author.get('group-type'))
eautils.set_if_value(ref_author, 'surname', author.get('surname'))
eautils.set_if_value(ref_author, 'given-names', author.get('given-names'))
eautils.set_if_value(ref_author, 'collab', author.get('collab'))
if ref_author:
ref.add_author(ref_author)
# Try to populate the doi attribute if the uri is a doi
if not ref.doi and ref.uri:
if ref.uri != eautils.doi_uri_to_doi(ref.uri):
ref.doi = eautils.doi_uri_to_doi(ref.uri)
# Append the reference to the list
ref_list.append(ref)
return ref_list | 0.002999 |
def create(dataset, annotations=None, feature=None, model='darknet-yolo',
classes=None, batch_size=0, max_iterations=0, verbose=True,
**kwargs):
"""
Create a :class:`ObjectDetector` model.
Parameters
----------
dataset : SFrame
Input data. The columns named by the ``feature`` and ``annotations``
parameters will be extracted for training the detector.
annotations : string
Name of the column containing the object detection annotations. This
column should be a list of dictionaries (or a single dictionary), with
each dictionary representing a bounding box of an object instance. Here
is an example of the annotations for a single image with two object
instances::
[{'label': 'dog',
'type': 'rectangle',
'coordinates': {'x': 223, 'y': 198,
'width': 130, 'height': 230}},
{'label': 'cat',
'type': 'rectangle',
'coordinates': {'x': 40, 'y': 73,
'width': 80, 'height': 123}}]
The value for `x` is the horizontal center of the box paired with
`width` and `y` is the vertical center of the box paired with `height`.
'None' (the default) indicates the only list column in `dataset` should
be used for the annotations.
feature : string
Name of the column containing the input images. 'None' (the default)
indicates the only image column in `dataset` should be used as the
feature.
model : string optional
Object detection model to use:
- "darknet-yolo" : Fast and medium-sized model
classes : list optional
List of strings containing the names of the classes of objects.
Inferred from the data if not provided.
batch_size: int
The number of images per training iteration. If 0, then it will be
automatically determined based on resource availability.
max_iterations : int
The number of training iterations. If 0, then it will be automatically
be determined based on the amount of data you provide.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : ObjectDetector
A trained :class:`ObjectDetector` model.
See Also
--------
ObjectDetector
Examples
--------
.. sourcecode:: python
# Train an object detector model
>>> model = turicreate.object_detector.create(data)
# Make predictions on the training set and as column to the SFrame
>>> data['predictions'] = model.predict(data)
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
"""
_raise_error_if_not_sframe(dataset, "dataset")
from ._mx_detector import YOLOLoss as _YOLOLoss
from ._model import tiny_darknet as _tiny_darknet
from ._sframe_loader import SFrameDetectionIter as _SFrameDetectionIter
from ._manual_scheduler import ManualScheduler as _ManualScheduler
import mxnet as _mx
from .._mxnet import _mxnet_utils
if len(dataset) == 0:
raise _ToolkitError('Unable to train on empty dataset')
_numeric_param_check_range('max_iterations', max_iterations, 0, _six.MAXSIZE)
start_time = _time.time()
supported_detectors = ['darknet-yolo']
if feature is None:
feature = _tkutl._find_only_image_column(dataset)
if verbose:
print("Using '%s' as feature column" % feature)
if annotations is None:
annotations = _tkutl._find_only_column_of_type(dataset,
target_type=[list, dict],
type_name='list',
col_name='annotations')
if verbose:
print("Using '%s' as annotations column" % annotations)
_raise_error_if_not_detection_sframe(dataset, feature, annotations,
require_annotations=True)
is_annotations_list = dataset[annotations].dtype == list
_tkutl._check_categorical_option_type('model', model,
supported_detectors)
base_model = model.split('-', 1)[0]
ref_model = _pre_trained_models.OBJECT_DETECTION_BASE_MODELS[base_model]()
params = {
'anchors': [
(1.0, 2.0), (1.0, 1.0), (2.0, 1.0),
(2.0, 4.0), (2.0, 2.0), (4.0, 2.0),
(4.0, 8.0), (4.0, 4.0), (8.0, 4.0),
(8.0, 16.0), (8.0, 8.0), (16.0, 8.0),
(16.0, 32.0), (16.0, 16.0), (32.0, 16.0),
],
'grid_shape': [13, 13],
'aug_resize': 0,
'aug_rand_crop': 0.9,
'aug_rand_pad': 0.9,
'aug_rand_gray': 0.0,
'aug_aspect_ratio': 1.25,
'aug_hue': 0.05,
'aug_brightness': 0.05,
'aug_saturation': 0.05,
'aug_contrast': 0.05,
'aug_horizontal_flip': True,
'aug_min_object_covered': 0,
'aug_min_eject_coverage': 0.5,
'aug_area_range': (.15, 2),
'aug_pca_noise': 0.0,
'aug_max_attempts': 20,
'aug_inter_method': 2,
'lmb_coord_xy': 10.0,
'lmb_coord_wh': 10.0,
'lmb_obj': 100.0,
'lmb_noobj': 5.0,
'lmb_class': 2.0,
'non_maximum_suppression_threshold': 0.45,
'rescore': True,
'clip_gradients': 0.025,
'weight_decay': 0.0005,
'sgd_momentum': 0.9,
'learning_rate': 1.0e-3,
'shuffle': True,
'mps_loss_mult': 8,
# This large buffer size (8 batches) is an attempt to mitigate against
# the SFrame shuffle operation that can occur after each epoch.
'io_thread_buffer_size': 8,
}
if '_advanced_parameters' in kwargs:
# Make sure no additional parameters are provided
new_keys = set(kwargs['_advanced_parameters'].keys())
set_keys = set(params.keys())
unsupported = new_keys - set_keys
if unsupported:
raise _ToolkitError('Unknown advanced parameters: {}'.format(unsupported))
params.update(kwargs['_advanced_parameters'])
anchors = params['anchors']
num_anchors = len(anchors)
if batch_size < 1:
batch_size = 32 # Default if not user-specified
cuda_gpus = _mxnet_utils.get_gpus_in_use(max_devices=batch_size)
num_mxnet_gpus = len(cuda_gpus)
use_mps = _use_mps() and num_mxnet_gpus == 0
batch_size_each = batch_size // max(num_mxnet_gpus, 1)
if use_mps and _mps_device_memory_limit() < 4 * 1024 * 1024 * 1024:
# Reduce batch size for GPUs with less than 4GB RAM
batch_size_each = 16
# Note, this may slightly alter the batch size to fit evenly on the GPUs
batch_size = max(num_mxnet_gpus, 1) * batch_size_each
if verbose:
print("Setting 'batch_size' to {}".format(batch_size))
# The IO thread also handles MXNet-powered data augmentation. This seems
# to be problematic to run independently of a MXNet-powered neural network
# in a separate thread. For this reason, we restrict IO threads to when
# the neural network backend is MPS.
io_thread_buffer_size = params['io_thread_buffer_size'] if use_mps else 0
if verbose:
# Estimate memory usage (based on experiments)
cuda_mem_req = 550 + batch_size_each * 85
_tkutl._print_neural_compute_device(cuda_gpus=cuda_gpus, use_mps=use_mps,
cuda_mem_req=cuda_mem_req)
grid_shape = params['grid_shape']
input_image_shape = (3,
grid_shape[0] * ref_model.spatial_reduction,
grid_shape[1] * ref_model.spatial_reduction)
try:
if is_annotations_list:
instances = (dataset.stack(annotations, new_column_name='_bbox', drop_na=True)
.unpack('_bbox', limit=['label']))
else:
instances = dataset.rename({annotations: '_bbox'}).dropna('_bbox')
instances = instances.unpack('_bbox', limit=['label'])
except (TypeError, RuntimeError):
# If this fails, the annotation format isinvalid at the coarsest level
raise _ToolkitError("Annotations format is invalid. Must be a list of "
"dictionaries or single dictionary containing 'label' and 'coordinates'.")
num_images = len(dataset)
num_instances = len(instances)
if classes is None:
classes = instances['_bbox.label'].unique()
classes = sorted(classes)
# Make a class-to-index look-up table
class_to_index = {name: index for index, name in enumerate(classes)}
num_classes = len(classes)
if max_iterations == 0:
# Set number of iterations through a heuristic
num_iterations_raw = 5000 * _np.sqrt(num_instances) / batch_size
num_iterations = 1000 * max(1, int(round(num_iterations_raw / 1000)))
if verbose:
print("Setting 'max_iterations' to {}".format(num_iterations))
else:
num_iterations = max_iterations
# Create data loader
loader = _SFrameDetectionIter(dataset,
batch_size=batch_size,
input_shape=input_image_shape[1:],
output_shape=grid_shape,
anchors=anchors,
class_to_index=class_to_index,
aug_params=params,
shuffle=params['shuffle'],
loader_type='augmented',
feature_column=feature,
annotations_column=annotations,
io_thread_buffer_size=io_thread_buffer_size,
iterations=num_iterations)
# Predictions per anchor box: x/y + w/h + object confidence + class probs
preds_per_box = 5 + num_classes
output_size = preds_per_box * num_anchors
ymap_shape = (batch_size_each,) + tuple(grid_shape) + (num_anchors, preds_per_box)
net = _tiny_darknet(output_size=output_size)
loss = _YOLOLoss(input_shape=input_image_shape[1:],
output_shape=grid_shape,
batch_size=batch_size_each,
num_classes=num_classes,
anchors=anchors,
parameters=params)
base_lr = params['learning_rate']
steps = [num_iterations // 2, 3 * num_iterations // 4, num_iterations]
steps_and_factors = [(step, 10**(-i)) for i, step in enumerate(steps)]
steps, factors = zip(*steps_and_factors)
lr_scheduler = _ManualScheduler(step=steps, factor=factors)
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size)
net_params = net.collect_params()
net_params.initialize(_mx.init.Xavier(), ctx=ctx)
net_params['conv7_weight'].initialize(_mx.init.Xavier(factor_type='avg'), ctx=ctx, force_reinit=True)
net_params['conv8_weight'].initialize(_mx.init.Uniform(0.00005), ctx=ctx, force_reinit=True)
# Initialize object confidence low, preventing an unnecessary adjustment
# period toward conservative estimates
bias = _np.zeros(output_size, dtype=_np.float32)
bias[4::preds_per_box] -= 6
from ._mx_detector import ConstantArray
net_params['conv8_bias'].initialize(ConstantArray(bias), ctx, force_reinit=True)
# Take a subset and then load the rest of the parameters. It is possible to
# do allow_missing=True directly on net_params. However, this will more
# easily hide bugs caused by names getting out of sync.
ref_model.available_parameters_subset(net_params).load(ref_model.model_path, ctx)
column_names = ['Iteration', 'Loss', 'Elapsed Time']
num_columns = len(column_names)
column_width = max(map(lambda x: len(x), column_names)) + 2
hr = '+' + '+'.join(['-' * column_width] * num_columns) + '+'
progress = {'smoothed_loss': None, 'last_time': 0}
iteration = 0
def update_progress(cur_loss, iteration):
iteration_base1 = iteration + 1
if progress['smoothed_loss'] is None:
progress['smoothed_loss'] = cur_loss
else:
progress['smoothed_loss'] = 0.9 * progress['smoothed_loss'] + 0.1 * cur_loss
cur_time = _time.time()
# Printing of table header is deferred, so that start-of-training
# warnings appear above the table
if verbose and iteration == 0:
# Print progress table header
print(hr)
print(('| {:<{width}}' * num_columns + '|').format(*column_names, width=column_width-1))
print(hr)
if verbose and (cur_time > progress['last_time'] + 10 or
iteration_base1 == max_iterations):
# Print progress table row
elapsed_time = cur_time - start_time
print("| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|".format(
cur_iter=iteration_base1, loss=progress['smoothed_loss'],
time=elapsed_time , width=column_width-1))
progress['last_time'] = cur_time
if use_mps:
# Force initialization of net_params
# TODO: Do not rely on MXNet to initialize MPS-based network
net.forward(_mx.nd.uniform(0, 1, (batch_size_each,) + input_image_shape))
mps_net_params = {}
keys = list(net_params)
for k in keys:
mps_net_params[k] = net_params[k].data().asnumpy()
# Multiplies the loss to move the fp16 gradients away from subnormals
# and gradual underflow. The learning rate is correspondingly divided
# by the same multiple to make training mathematically equivalent. The
# update is done in fp32, which is why this trick works. Does not
# affect how loss is presented to the user.
mps_loss_mult = params['mps_loss_mult']
mps_config = {
'mode': _MpsGraphMode.Train,
'use_sgd': True,
'learning_rate': base_lr / params['mps_loss_mult'],
'gradient_clipping': params.get('clip_gradients', 0.0) * mps_loss_mult,
'weight_decay': params['weight_decay'],
'od_include_network': True,
'od_include_loss': True,
'od_scale_xy': params['lmb_coord_xy'] * mps_loss_mult,
'od_scale_wh': params['lmb_coord_wh'] * mps_loss_mult,
'od_scale_no_object': params['lmb_noobj'] * mps_loss_mult,
'od_scale_object': params['lmb_obj'] * mps_loss_mult,
'od_scale_class': params['lmb_class'] * mps_loss_mult,
'od_max_iou_for_no_object': 0.3,
'od_min_iou_for_object': 0.7,
'od_rescore': params['rescore'],
}
mps_net = _get_mps_od_net(input_image_shape=input_image_shape,
batch_size=batch_size,
output_size=output_size,
anchors=anchors,
config=mps_config,
weights=mps_net_params)
# Use worker threads to isolate different points of synchronization
# and/or waiting for non-Python tasks to finish. The
# sframe_worker_thread will spend most of its time waiting for SFrame
# operations, largely image I/O and decoding, along with scheduling
# MXNet data augmentation. The numpy_worker_thread will spend most of
# its time waiting for MXNet data augmentation to complete, along with
# copying the results into NumPy arrays. Finally, the main thread will
# spend most of its time copying NumPy data into MPS and waiting for the
# results. Note that using three threads here only makes sense because
# each thread spends time waiting for non-Python code to finish (so that
# no thread hogs the global interpreter lock).
mxnet_batch_queue = _Queue(1)
numpy_batch_queue = _Queue(1)
def sframe_worker():
# Once a batch is loaded into NumPy, pass it immediately to the
# numpy_worker so that we can start I/O and decoding for the next
# batch.
for batch in loader:
mxnet_batch_queue.put(batch)
mxnet_batch_queue.put(None)
def numpy_worker():
while True:
batch = mxnet_batch_queue.get()
if batch is None:
break
for x, y in zip(batch.data, batch.label):
# Convert to NumPy arrays with required shapes. Note that
# asnumpy waits for any pending MXNet operations to finish.
input_data = _mxnet_to_mps(x.asnumpy())
label_data = y.asnumpy().reshape(y.shape[:-2] + (-1,))
# Convert to packed 32-bit arrays.
input_data = input_data.astype(_np.float32)
if not input_data.flags.c_contiguous:
input_data = input_data.copy()
label_data = label_data.astype(_np.float32)
if not label_data.flags.c_contiguous:
label_data = label_data.copy()
# Push this batch to the main thread.
numpy_batch_queue.put({'input' : input_data,
'label' : label_data,
'iteration' : batch.iteration})
# Tell the main thread there's no more data.
numpy_batch_queue.put(None)
sframe_worker_thread = _Thread(target=sframe_worker)
sframe_worker_thread.start()
numpy_worker_thread = _Thread(target=numpy_worker)
numpy_worker_thread.start()
batch_queue = []
def wait_for_batch():
pending_loss = batch_queue.pop(0)
batch_loss = pending_loss.asnumpy() # Waits for the batch to finish
return batch_loss.sum() / mps_loss_mult
while True:
batch = numpy_batch_queue.get()
if batch is None:
break
# Adjust learning rate according to our schedule.
if batch['iteration'] in steps:
ii = steps.index(batch['iteration']) + 1
new_lr = factors[ii] * base_lr
mps_net.set_learning_rate(new_lr / mps_loss_mult)
# Submit this match to MPS.
batch_queue.append(mps_net.train(batch['input'], batch['label']))
# If we have two batches in flight, wait for the first one.
if len(batch_queue) > 1:
cur_loss = wait_for_batch()
# If we just submitted the first batch of an iteration, update
# progress for the iteration completed by the last batch we just
# waited for.
if batch['iteration'] > iteration:
update_progress(cur_loss, iteration)
iteration = batch['iteration']
# Wait for any pending batches and finalize our progress updates.
while len(batch_queue) > 0:
cur_loss = wait_for_batch()
update_progress(cur_loss, iteration)
sframe_worker_thread.join()
numpy_worker_thread.join()
# Load back into mxnet
mps_net_params = mps_net.export()
keys = mps_net_params.keys()
for k in keys:
if k in net_params:
net_params[k].set_data(mps_net_params[k])
else: # Use MxNet
net.hybridize()
options = {'learning_rate': base_lr, 'lr_scheduler': lr_scheduler,
'momentum': params['sgd_momentum'], 'wd': params['weight_decay'], 'rescale_grad': 1.0}
clip_grad = params.get('clip_gradients')
if clip_grad:
options['clip_gradient'] = clip_grad
trainer = _mx.gluon.Trainer(net.collect_params(), 'sgd', options)
for batch in loader:
data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
Ls = []
Zs = []
with _mx.autograd.record():
for x, y in zip(data, label):
z = net(x)
z0 = _mx.nd.transpose(z, [0, 2, 3, 1]).reshape(ymap_shape)
L = loss(z0, y)
Ls.append(L)
for L in Ls:
L.backward()
trainer.step(1)
cur_loss = _np.mean([L.asnumpy()[0] for L in Ls])
update_progress(cur_loss, batch.iteration)
iteration = batch.iteration
training_time = _time.time() - start_time
if verbose:
print(hr) # progress table footer
# Save the model
training_iterations = iteration + 1
state = {
'_model': net,
'_class_to_index': class_to_index,
'_training_time_as_string': _seconds_as_string(training_time),
'_grid_shape': grid_shape,
'anchors': anchors,
'model': model,
'classes': classes,
'batch_size': batch_size,
'input_image_shape': input_image_shape,
'feature': feature,
'non_maximum_suppression_threshold': params['non_maximum_suppression_threshold'],
'annotations': annotations,
'num_classes': num_classes,
'num_examples': num_images,
'num_bounding_boxes': num_instances,
'training_time': training_time,
'training_epochs': training_iterations * batch_size // num_images,
'training_iterations': training_iterations,
'max_iterations': max_iterations,
'training_loss': progress['smoothed_loss'],
}
return ObjectDetector(state) | 0.001721 |
def async_process(funcs, *args, executor=False, sol=None, callback=None, **kw):
"""
Execute `func(*args)` in an asynchronous parallel process.
:param funcs:
Functions to be executed.
:type funcs: list[callable]
:param args:
Arguments to be passed to first function call.
:type args: tuple
:param executor:
Pool executor to run the function.
:type executor: str | bool
:param sol:
Parent solution.
:type sol: schedula.utils.sol.Solution
:param callback:
Callback function to be called after all function execution.
:type callback: callable
:param kw:
Keywords to be passed to first function call.
:type kw: dict
:return:
Functions result.
:rtype: object
"""
name = _executor_name(executor, sol.dsp)
e = _get_executor(name)
res = (e and e.process_funcs or _process_funcs)(
name, funcs, executor, *args, **kw
)
for r in res:
callback and callback('sol' in r, r.get('sol', r.get('res')))
if 'err' in r:
raise r['err']
return res[-1]['res'] | 0.000887 |
def orderStatus(self, orderId, status, filled, remaining, avgFillPrice, permId, parentId, lastFillPrice, clientId, whyHeld):
"""orderStatus(EWrapper self, OrderId orderId, IBString const & status, int filled, int remaining, double avgFillPrice, int permId, int parentId, double lastFillPrice, int clientId, IBString const & whyHeld)"""
return _swigibpy.EWrapper_orderStatus(self, orderId, status, filled, remaining, avgFillPrice, permId, parentId, lastFillPrice, clientId, whyHeld) | 0.01006 |
def distance(x,y):
"""[summary]
HELPER-FUNCTION
calculates the (eulidean) distance between vector x and y.
Arguments:
x {[tuple]} -- [vector]
y {[tuple]} -- [vector]
"""
assert len(x) == len(y), "The vector must have same length"
result = ()
sum = 0
for i in range(len(x)):
result += (x[i] -y[i],)
for component in result:
sum += component**2
return math.sqrt(sum) | 0.006803 |
def _process_worker(call_queue, result_queue, initializer, initargs,
processes_management_lock, timeout, worker_exit_lock,
current_depth):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A ctx.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A ctx.Queue of _ResultItems that will written
to by the worker.
initializer: A callable initializer, or None
initargs: A tuple of args for the initializer
process_management_lock: A ctx.Lock avoiding worker timeout while some
workers are being spawned.
timeout: maximum time to wait for a new item in the call_queue. If that
time is expired, the worker will shutdown.
worker_exit_lock: Lock to avoid flagging the executor as broken on
workers timeout.
current_depth: Nested parallelism level, to avoid infinite spawning.
"""
if initializer is not None:
try:
initializer(*initargs)
except BaseException:
_base.LOGGER.critical('Exception in initializer:', exc_info=True)
# The parent will notice that the process stopped and
# mark the pool broken
return
# set the global _CURRENT_DEPTH mechanism to limit recursive call
global _CURRENT_DEPTH
_CURRENT_DEPTH = current_depth
_process_reference_size = None
_last_memory_leak_check = None
pid = os.getpid()
mp.util.debug('Worker started with timeout=%s' % timeout)
while True:
try:
call_item = call_queue.get(block=True, timeout=timeout)
if call_item is None:
mp.util.info("Shutting down worker on sentinel")
except queue.Empty:
mp.util.info("Shutting down worker after timeout %0.3fs"
% timeout)
if processes_management_lock.acquire(block=False):
processes_management_lock.release()
call_item = None
else:
mp.util.info("Could not acquire processes_management_lock")
continue
except BaseException as e:
previous_tb = traceback.format_exc()
try:
result_queue.put(_RemoteTraceback(previous_tb))
except BaseException:
# If we cannot format correctly the exception, at least print
# the traceback.
print(previous_tb)
sys.exit(1)
if call_item is None:
# Notify queue management thread about clean worker shutdown
result_queue.put(pid)
with worker_exit_lock:
return
try:
r = call_item()
except BaseException as e:
exc = _ExceptionWithTraceback(e)
result_queue.put(_ResultItem(call_item.work_id, exception=exc))
else:
_sendback_result(result_queue, call_item.work_id, result=r)
del r
# Free the resource as soon as possible, to avoid holding onto
# open files or shared memory that is not needed anymore
del call_item
if _USE_PSUTIL:
if _process_reference_size is None:
# Make reference measurement after the first call
_process_reference_size = _get_memory_usage(pid, force_gc=True)
_last_memory_leak_check = time()
continue
if time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY:
mem_usage = _get_memory_usage(pid)
_last_memory_leak_check = time()
if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE:
# Memory usage stays within bounds: everything is fine.
continue
# Check again memory usage; this time take the measurement
# after a forced garbage collection to break any reference
# cycles.
mem_usage = _get_memory_usage(pid, force_gc=True)
_last_memory_leak_check = time()
if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE:
# The GC managed to free the memory: everything is fine.
continue
# The process is leaking memory: let the master process
# know that we need to start a new worker.
mp.util.info("Memory leak detected: shutting down worker")
result_queue.put(pid)
with worker_exit_lock:
return
else:
# if psutil is not installed, trigger gc.collect events
# regularly to limit potential memory leaks due to reference cycles
if ((_last_memory_leak_check is None) or
(time() - _last_memory_leak_check >
_MEMORY_LEAK_CHECK_DELAY)):
gc.collect()
_last_memory_leak_check = time() | 0.000195 |
def get_all_rml(self, **kwargs):
"""
Returns a dictionary with the output of all the rml procceor results
"""
rml_procs = self.es_defs.get("kds_esRmlProcessor", [])
role = kwargs.get('role')
if role:
rml_procs = [proc for proc in rml_procs
if role == 'rdf_class' or
proc['force']]
rml_maps = {}
for rml in rml_procs:
rml_maps[rml['name']] = self.get_rml(rml, **kwargs)
return rml_maps | 0.003774 |
def mapFromChart( self, x, y ):
"""
Maps a chart point to a pixel position within the grid based on the
rulers.
:param x | <variant>
y | <variant>
:return <QPointF>
"""
grid = self.gridRect()
hruler = self.horizontalRuler()
vruler = self.verticalRuler()
xperc = hruler.percentAt(x)
yperc = vruler.percentAt(y)
xoffset = grid.width() * xperc
yoffset = grid.height() * yperc
xpos = grid.left() + xoffset
ypos = grid.bottom() - yoffset
return QPointF(xpos, ypos) | 0.026171 |
def get_host(self):
"""
Gets the host name or IP address.
:return: the host name or IP address.
"""
host = self.get_as_nullable_string("host")
host = host if host != None else self.get_as_nullable_string("ip")
return host | 0.010791 |
def msg_curse(self, args=None, max_width=None):
"""Return the dict to display in the curse interface."""
# Init the return message
ret = []
# Only process if stats exist, not empty (issue #871) and plugin not disabled
if not self.stats or (self.stats == []) or self.is_disable():
return ret
# Check if all GPU have the same name
same_name = all(s['name'] == self.stats[0]['name'] for s in self.stats)
# gpu_stats contain the first GPU in the list
gpu_stats = self.stats[0]
# Header
header = ''
if len(self.stats) > 1:
header += '{} '.format(len(self.stats))
if same_name:
header += '{} {}'.format('GPU', gpu_stats['name'])
else:
header += '{}'.format('GPU')
msg = header[:17]
ret.append(self.curse_add_line(msg, "TITLE"))
# Build the string message
if len(self.stats) == 1 or args.meangpu:
# GPU stat summary or mono GPU
# New line
ret.append(self.curse_new_line())
# GPU PROC
try:
mean_proc = sum(s['proc'] for s in self.stats if s is not None) / len(self.stats)
except TypeError:
mean_proc_msg = '{:>4}'.format('N/A')
else:
mean_proc_msg = '{:>3.0f}%'.format(mean_proc)
if len(self.stats) > 1:
msg = '{:13}'.format('proc mean:')
else:
msg = '{:13}'.format('proc:')
ret.append(self.curse_add_line(msg))
ret.append(self.curse_add_line(
mean_proc_msg, self.get_views(item=gpu_stats[self.get_key()],
key='proc',
option='decoration')))
# New line
ret.append(self.curse_new_line())
# GPU MEM
try:
mean_mem = sum(s['mem'] for s in self.stats if s is not None) / len(self.stats)
except TypeError:
mean_mem_msg = '{:>4}'.format('N/A')
else:
mean_mem_msg = '{:>3.0f}%'.format(mean_mem)
if len(self.stats) > 1:
msg = '{:13}'.format('mem mean:')
else:
msg = '{:13}'.format('mem:')
ret.append(self.curse_add_line(msg))
ret.append(self.curse_add_line(
mean_mem_msg, self.get_views(item=gpu_stats[self.get_key()],
key='mem',
option='decoration')))
else:
# Multi GPU
for gpu_stats in self.stats:
# New line
ret.append(self.curse_new_line())
# GPU ID + PROC + MEM
id_msg = '{}'.format(gpu_stats['gpu_id'])
try:
proc_msg = '{:>3.0f}%'.format(gpu_stats['proc'])
except ValueError:
proc_msg = '{:>4}'.format('N/A')
try:
mem_msg = '{:>3.0f}%'.format(gpu_stats['mem'])
except ValueError:
mem_msg = '{:>4}'.format('N/A')
msg = '{}: {} mem: {}'.format(id_msg, proc_msg, mem_msg)
ret.append(self.curse_add_line(msg))
return ret | 0.001467 |
def is_valid_continuous_partition_object(partition_object):
"""Tests whether a given object is a valid continuous partition object.
:param partition_object: The partition_object to evaluate
:return: Boolean
"""
if (partition_object is None) or ("weights" not in partition_object) or ("bins" not in partition_object):
return False
if("tail_weights" in partition_object):
if (len(partition_object["tail_weights"])!=2):
return False
comb_weights=partition_object["tail_weights"]+partition_object["weights"]
else:
comb_weights=partition_object["weights"]
# Expect one more bin edge than weight; all bin edges should be monotonically increasing; weights should sum to one
if (len(partition_object['bins']) == (len(partition_object['weights']) + 1)) and \
np.all(np.diff(partition_object['bins']) > 0) and \
np.allclose(np.sum(comb_weights), 1):
return True
return False | 0.008163 |
def _update_value(config, key, instruction, is_sensitive):
"""
creates (if needed) and updates the value of the key in the config with a
value entered by the user
Parameters
----------
config: ConfigParser object
existing configuration
key: string
key to update
instruction: string
text to show in the prompt
is_sensitive: bool
if true, require confirmation and do not show typed characters
Notes
-----
sets key in config passed in
"""
if config.has_option(PROFILE, key):
current_value = config.get(PROFILE, key)
else:
current_value = None
proposed = click.prompt(
instruction,
default=current_value,
hide_input=is_sensitive,
confirmation_prompt=is_sensitive,
)
if key == 'host' or key == 'prod_folder':
if proposed[-1] == '/':
proposed = proposed[:-1]
if key == 'host':
if 'http' != proposed[:4]:
proposed = click.prompt(
("looks like there's an issue - "
'make sure the host name starts with http'),
default=current_value,
hide_input=is_sensitive,
confirmation_prompt=is_sensitive,
)
config.set(PROFILE, key, proposed) | 0.000758 |
def thumbnails_for_file(relative_source_path, root=None, basedir=None,
subdir=None, prefix=None):
"""
Return a list of dictionaries, one for each thumbnail belonging to the
source image.
The following list explains each key of the dictionary:
`filename` -- absolute thumbnail path
`x` and `y` -- the size of the thumbnail
`options` -- list of options for this thumbnail
`quality` -- quality setting for this thumbnail
"""
if root is None:
root = settings.MEDIA_ROOT
if prefix is None:
prefix = settings.THUMBNAIL_PREFIX
if subdir is None:
subdir = settings.THUMBNAIL_SUBDIR
if basedir is None:
basedir = settings.THUMBNAIL_BASEDIR
source_dir, filename = os.path.split(relative_source_path)
thumbs_path = os.path.join(root, basedir, source_dir, subdir)
if not os.path.isdir(thumbs_path):
return []
files = all_thumbnails(thumbs_path, recursive=False, prefix=prefix,
subdir='')
return files.get(filename, []) | 0.000923 |
def buildNavigation(self):
"""
Chooses the appropriate layout navigation component based on user prefs
"""
if self.buildSpec['navigation'] == constants.TABBED:
navigation = Tabbar(self, self.buildSpec, self.configs)
else:
navigation = Sidebar(self, self.buildSpec, self.configs)
if self.buildSpec['navigation'] == constants.HIDDEN:
navigation.Hide()
return navigation | 0.004193 |
def _connect(self):
"""Connect to socket. This should be run in a new thread."""
while self.protocol:
_LOGGER.info('Trying to connect to %s', self.server_address)
try:
sock = socket.create_connection(
self.server_address, self.reconnect_timeout)
except socket.timeout:
_LOGGER.error(
'Connecting to socket timed out for %s',
self.server_address)
_LOGGER.info(
'Waiting %s secs before trying to connect again',
self.reconnect_timeout)
time.sleep(self.reconnect_timeout)
except OSError:
_LOGGER.error(
'Failed to connect to socket at %s', self.server_address)
_LOGGER.info(
'Waiting %s secs before trying to connect again',
self.reconnect_timeout)
time.sleep(self.reconnect_timeout)
else:
self.tcp_check_timer = time.time()
self.tcp_disconnect_timer = time.time()
transport = TCPTransport(
sock, lambda: self.protocol, self._check_connection)
poll_thread = threading.Thread(target=self._poll_queue)
self._stop_event.clear()
poll_thread.start()
transport.start()
transport.connect()
return | 0.001335 |
def parent(self, index):
"""
Reimplements the :meth:`QAbstractItemModel.parent` method.
:param index: Index.
:type index: QModelIndex
:return: Parent.
:rtype: QModelIndex
"""
if not index.isValid():
return QModelIndex()
node = self.get_node(index)
parent_node = node.parent
if not parent_node:
return QModelIndex()
if parent_node == self.__root_node:
return QModelIndex()
row = parent_node.row()
return self.createIndex(row, 0, parent_node) if row is not None else QModelIndex() | 0.004754 |
def add_keyrepeat_callback(self, key, fn):
"""
Allows for custom callback functions for the viewer. Called on key repeat.
Parameter 'any' will ensure that the callback is called on any key repeat,
and block default mujoco viewer callbacks from executing, except for
the ESC callback to close the viewer.
"""
self.viewer.keyrepeat[key].append(fn) | 0.012438 |
def is_discover(n):
"""Checks if credit card number fits the discover card format."""
n, length = str(n), len(str(n))
if length == 16:
if n[0] == '6':
if ''.join(n[1:4]) == '011' or n[1] == '5':
return True
elif n[1] == '4' and n[2] in strings_between(4, 10):
return True
elif ''.join(n[1:6]) in strings_between(22126, 22926):
return True
return False | 0.002174 |
def ui_main(fmt_table, node_dict):
"""Create the base UI in command mode."""
cmd_funct = {"quit": False,
"run": node_cmd,
"stop": node_cmd,
"connect": node_cmd,
"details": node_cmd,
"update": True}
ui_print("\033[?25l") # cursor off
print("{}\n".format(fmt_table))
sys.stdout.flush()
# refresh_main values:
# None = loop main-cmd, True = refresh-list, False = exit-program
refresh_main = None
while refresh_main is None:
cmd_name = get_user_cmd(node_dict)
if callable(cmd_funct[cmd_name]):
refresh_main = cmd_funct[cmd_name](cmd_name, node_dict)
else:
refresh_main = cmd_funct[cmd_name]
if cmd_name != "connect" and refresh_main:
ui_clear(len(node_dict) + 2)
return refresh_main | 0.001155 |
def edit_rrset(self, zone_name, rtype, owner_name, ttl, rdata, profile=None):
"""Updates an existing RRSet in the specified zone.
Arguments:
zone_name -- The zone that contains the RRSet. The trailing dot is optional.
rtype -- The type of the RRSet. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
ttl -- The updated TTL value for the RRSet.
rdata -- The updated BIND data for the RRSet as a string.
If there is a single resource record in the RRSet, you can pass in the single string.
If there are multiple resource records in this RRSet, pass in a list of strings.
profile -- The profile info if this is updating a resource pool
"""
if type(rdata) is not list:
rdata = [rdata]
rrset = {"ttl": ttl, "rdata": rdata}
if profile:
rrset["profile"] = profile
uri = "/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name
return self.rest_api_connection.put(uri, json.dumps(rrset)) | 0.005755 |
def disable_all_tokens(platform, user_id, on_error=None, on_success=None):
""" Disable ALL device tokens for the given user on the specified platform.
:param str platform The platform which to disable token on. One of either
Google Cloud Messaging (outbound.GCM) or Apple Push Notification Service
(outbound.APNS).
:param str | number user_id: the id you use to identify a user. this should
be static for the lifetime of a user.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
"""
__device_token(platform, False, user_id, all=True, on_error=on_error, on_success=on_success) | 0.005348 |
def update_grammar_with_untyped_entities(grammar_dictionary: Dict[str, List[str]]) -> None:
"""
Variables can be treated as numbers or strings if their type can be inferred -
however, that can be difficult, so instead, we can just treat them all as values
and be a bit looser on the typing we allow in our grammar. Here we just remove
all references to number and string from the grammar, replacing them with value.
"""
grammar_dictionary["string_set_vals"] = ['(value ws "," ws string_set_vals)', 'value']
grammar_dictionary["value"].remove('string')
grammar_dictionary["value"].remove('number')
grammar_dictionary["limit"] = ['("LIMIT" ws "1")', '("LIMIT" ws value)']
grammar_dictionary["expr"][1] = '(value wsp "LIKE" wsp value)'
del grammar_dictionary["string"]
del grammar_dictionary["number"] | 0.008235 |
def _get_download_output_manager_cls(self, transfer_future, osutil):
"""Retrieves a class for managing output for a download
:type transfer_future: s3transfer.futures.TransferFuture
:param transfer_future: The transfer future for the request
:type osutil: s3transfer.utils.OSUtils
:param osutil: The os utility associated to the transfer
:rtype: class of DownloadOutputManager
:returns: The appropriate class to use for managing a specific type of
input for downloads.
"""
download_manager_resolver_chain = [
DownloadSpecialFilenameOutputManager,
DownloadFilenameOutputManager,
DownloadSeekableOutputManager,
DownloadNonSeekableOutputManager,
]
fileobj = transfer_future.meta.call_args.fileobj
for download_manager_cls in download_manager_resolver_chain:
if download_manager_cls.is_compatible(fileobj, osutil):
return download_manager_cls
raise RuntimeError(
'Output %s of type: %s is not supported.' % (
fileobj, type(fileobj))) | 0.001735 |
def notifyObservers(self, arg=None):
'''If 'changed' indicates that this object
has changed, notify all its observers, then
call clearChanged(). Each observer has its
update() called with two arguments: this
observable object and the generic 'arg'.'''
self.mutex.acquire()
try:
if not self.changed:
return
# Make a local copy in case of synchronous
# additions of observers:
localArray = self.obs[:]
self.clearChanged()
finally:
self.mutex.release()
# Update observers
for observer in localArray:
observer.update(self, arg) | 0.004237 |
def traverse(self, node):
"""Traverse the document tree rooted at node.
node : docutil node
current root node to traverse
"""
self.find_replace(node)
for c in node.children:
self.traverse(c) | 0.008889 |
def enable(self):
"""
Enables all settings
"""
nwin = self.nwin.value()
for label, xs, ys, nx, ny in \
zip(self.label[:nwin], self.xs[:nwin], self.ys[:nwin],
self.nx[:nwin], self.ny[:nwin]):
label.config(state='normal')
xs.enable()
ys.enable()
nx.enable()
ny.enable()
for label, xs, ys, nx, ny in \
zip(self.label[nwin:], self.xs[nwin:], self.ys[nwin:],
self.nx[nwin:], self.ny[nwin:]):
label.config(state='disable')
xs.disable()
ys.disable()
nx.disable()
ny.disable()
self.nwin.enable()
self.xbin.enable()
self.ybin.enable()
self.sbutt.enable() | 0.002442 |
def dataframe(df, **kwargs):
"""Print table with data from the given pandas DataFrame
Parameters
----------
df : DataFrame
A pandas DataFrame with the table to print
"""
table(df.values, list(df.columns), **kwargs) | 0.004049 |
def radial_symmetry(mesh):
"""
Check whether a mesh has rotational symmetry.
Returns
-----------
symmetry : None or str
None No rotational symmetry
'radial' Symmetric around an axis
'spherical' Symmetric around a point
axis : None or (3,) float
Rotation axis or point
section : None or (3, 2) float
If radial symmetry provide vectors
to get cross section
"""
# if not a volume this is meaningless
if not mesh.is_volume:
return None, None, None
# the sorted order of the principal components of inertia (3,) float
order = mesh.principal_inertia_components.argsort()
# we are checking if a geometry has radial symmetry
# if 2 of the PCI are equal, it is a revolved 2D profile
# if 3 of the PCI (all of them) are equal it is a sphere
# thus we take the diff of the sorted PCI, scale it as a ratio
# of the largest PCI, and then scale to the tolerance we care about
# if tol is 1e-3, that means that 2 components are identical if they
# are within .1% of the maximum PCI.
diff = np.abs(np.diff(mesh.principal_inertia_components[order]))
diff /= np.abs(mesh.principal_inertia_components).max()
# diffs that are within tol of zero
diff_zero = (diff / 1e-3).astype(int) == 0
if diff_zero.all():
# this is the case where all 3 PCI are identical
# this means that the geometry is symmetric about a point
# examples of this are a sphere, icosahedron, etc
axis = mesh.principal_inertia_vectors[0]
section = mesh.principal_inertia_vectors[1:]
return 'spherical', axis, section
elif diff_zero.any():
# this is the case for 2/3 PCI are identical
# this means the geometry is symmetric about an axis
# probably a revolved 2D profile
# we know that only 1/2 of the diff values are True
# if the first diff is 0, it means if we take the first element
# in the ordered PCI we will have one of the non- revolve axis
# if the second diff is 0, we take the last element of
# the ordered PCI for the section axis
# if we wanted the revolve axis we would just switch [0,-1] to
# [-1,0]
# since two vectors are the same, we know the middle
# one is one of those two
section_index = order[np.array([[0, 1],
[1, -1]])[diff_zero]].flatten()
section = mesh.principal_inertia_vectors[section_index]
# we know the rotation axis is the sole unique value
# and is either first or last of the sorted values
axis_index = order[np.array([-1, 0])[diff_zero]][0]
axis = mesh.principal_inertia_vectors[axis_index]
return 'radial', axis, section
return None, None, None | 0.000351 |
def read_component_sitemap(
self, sitemapindex_uri, sitemap_uri, sitemap, sitemapindex_is_file):
"""Read a component sitemap of a Resource List with index.
Each component must be a sitemap with the
"""
if (sitemapindex_is_file):
if (not self.is_file_uri(sitemap_uri)):
# Attempt to map URI to local file
remote_uri = sitemap_uri
sitemap_uri = self.mapper.src_to_dst(remote_uri)
self.logger.info(
"Mapped %s to local file %s" %
(remote_uri, sitemap_uri))
else:
# The individual sitemaps should be at a URL (scheme/server/path)
# that the sitemapindex URL can speak authoritatively about
if (self.check_url_authority and
not UrlAuthority(sitemapindex_uri).has_authority_over(sitemap_uri)):
raise ListBaseIndexError(
"The sitemapindex (%s) refers to sitemap at a location it does not have authority over (%s)" %
(sitemapindex_uri, sitemap_uri))
try:
fh = URLopener().open(sitemap_uri)
self.num_files += 1
except IOError as e:
raise ListBaseIndexError(
"Failed to load sitemap from %s listed in sitemap index %s (%s)" %
(sitemap_uri, sitemapindex_uri, str(e)))
# Get the Content-Length if we can (works fine for local files)
try:
self.content_length = int(fh.info()['Content-Length'])
self.bytes_read += self.content_length
except KeyError:
# If we don't get a length then c'est la vie
pass
self.logger.info(
"Reading sitemap from %s (%d bytes)" %
(sitemap_uri, self.content_length))
component = sitemap.parse_xml(fh=fh, sitemapindex=False)
# Copy resources into self, check any metadata
for r in component:
self.resources.add(r) | 0.003395 |
def on_binop(self, node): # ('left', 'op', 'right')
"""Binary operator."""
return op2func(node.op)(self.run(node.left),
self.run(node.right)) | 0.010417 |
def _copy_gl_functions(source, dest, constants=False):
""" Inject all objects that start with 'gl' from the source
into the dest. source and dest can be dicts, modules or BaseGLProxy's.
"""
# Get dicts
if isinstance(source, BaseGLProxy):
s = {}
for key in dir(source):
s[key] = getattr(source, key)
source = s
elif not isinstance(source, dict):
source = source.__dict__
if not isinstance(dest, dict):
dest = dest.__dict__
# Copy names
funcnames = [name for name in source.keys() if name.startswith('gl')]
for name in funcnames:
dest[name] = source[name]
# Copy constants
if constants:
constnames = [name for name in source.keys() if name.startswith('GL_')]
for name in constnames:
dest[name] = source[name] | 0.001188 |
def add_next_tick_callback(self, callback, callback_id=None):
""" Adds a callback to be run on the next tick.
Returns an ID that can be used with remove_next_tick_callback."""
def wrapper(*args, **kwargs):
# this 'removed' flag is a hack because Tornado has no way
# to remove a "next tick" callback added with
# IOLoop.add_callback. So instead we make our wrapper skip
# invoking the callback.
if not wrapper.removed:
self.remove_next_tick_callback(callback_id)
return callback(*args, **kwargs)
else:
return None
wrapper.removed = False
def remover():
wrapper.removed = True
callback_id = self._assign_remover(callback, callback_id, self._next_tick_callback_removers, remover)
self._loop.add_callback(wrapper)
return callback_id | 0.003233 |
def process_context_token(self, context_token):
"""
Provides a way to pass an asynchronous token to the security context, outside of the normal
context-establishment token passing flow. This method is not normally used, but some
example uses are:
* when the initiator's context is established successfully but the acceptor's context isn't
and the acceptor needs to signal to the initiator that the context shouldn't be used.
* if :meth:`delete` on one peer's context returns a final token that can be passed to the
other peer to indicate the other peer's context should be torn down as well (though it's
recommended that :meth:`delete` should return nothing, i.e. this method should not be
used by GSSAPI mechanisms).
:param context_token: The context token to pass to the security context
:type context_token: bytes
:raises: :exc:`~gssapi.error.DefectiveToken` if consistency checks on the token failed.
:exc:`~gssapi.error.NoContext` if this context is invalid.
:exc:`~gssapi.error.GSSException` for any other GSSAPI errors.
"""
minor_status = ffi.new('OM_uint32[1]')
context_token_buffer = ffi.new('gss_buffer_desc[1]')
context_token_buffer[0].length = len(context_token)
c_str_context_token = ffi.new('char[]', context_token)
context_token_buffer[0].value = c_str_context_token
retval = C.gss_process_context_token(
minor_status,
self._ctx[0],
context_token_buffer
)
if GSS_ERROR(retval):
if minor_status[0] and self.mech_type:
raise _exception_for_status(retval, minor_status[0], self.mech_type)
else:
raise _exception_for_status(retval, minor_status[0]) | 0.005911 |
def to_python(self, value):
"""
Convert the input JSON value into python structures, raises
django.core.exceptions.ValidationError if the data can't be converted.
"""
if isinstance(value, dict):
return value
if self.blank and not value:
return None
if isinstance(value, string_types):
try:
return json.loads(value)
except Exception as e:
raise ValidationError(str(e))
return value | 0.003795 |
def handle(): # pragma: no cover
"""
Main program execution handler.
"""
try:
cli = ZappaCLI()
sys.exit(cli.handle())
except SystemExit as e: # pragma: no cover
cli.on_exit()
sys.exit(e.code)
except KeyboardInterrupt: # pragma: no cover
cli.on_exit()
sys.exit(130)
except Exception as e:
cli.on_exit()
click.echo("Oh no! An " + click.style("error occurred", fg='red', bold=True) + "! :(")
click.echo("\n==============\n")
import traceback
traceback.print_exc()
click.echo("\n==============\n")
shamelessly_promote()
sys.exit(-1) | 0.00744 |
def fix_varscan_output(line, normal_name="", tumor_name=""):
"""Fix a varscan VCF line.
Fixes the ALT column and also fixes floating point values
output as strings to by Floats: FREQ, SSC.
This function was contributed by Sean Davis <[email protected]>,
with minor modifications by Luca Beltrame <[email protected]>.
"""
line = line.strip()
tofix = ("##INFO=<ID=SSC", "##FORMAT=<ID=FREQ")
if(line.startswith("##")):
if line.startswith(tofix):
line = line.replace('Number=1,Type=String',
'Number=1,Type=Float')
return line
line = line.split("\t")
if line[0].startswith("#CHROM"):
if tumor_name and normal_name:
mapping = {"NORMAL": normal_name, "TUMOR": tumor_name}
base_header = line[:9]
old_samples = line[9:]
if len(old_samples) == 0:
return "\t".join(line)
samples = [mapping[sample_name] for sample_name in old_samples]
assert len(old_samples) == len(samples)
return "\t".join(base_header + samples)
else:
return "\t".join(line)
try:
REF, ALT = line[3:5]
except ValueError:
return "\t".join(line)
def _normalize_freq(line, sample_i):
"""Ensure FREQ genotype value is float as defined in header.
"""
ft_parts = line[8].split(":")
dat = line[sample_i].split(":")
# Non-conforming no-call sample, don't try to fix FREQ
if len(dat) != len(ft_parts):
return line
freq_i = ft_parts.index("FREQ")
try:
dat[freq_i] = str(float(dat[freq_i].rstrip("%")) / 100)
except ValueError: # illegal binary characters -- set frequency to zero
dat[freq_i] = "0.0"
line[sample_i] = ":".join(dat)
return line
if len(line) > 9:
line = _normalize_freq(line, 9)
if len(line) > 10:
line = _normalize_freq(line, 10)
# HACK: The position of the SS= changes, so we just search for it
ss_vals = [item for item in line[7].split(";") if item.startswith("SS=")]
if len(ss_vals) > 0:
somatic_status = int(ss_vals[0].split("=")[1]) # Get the number
else:
somatic_status = None
if somatic_status == 5:
# "Unknown" states are broken in current versions of VarScan
# so we just bail out here for now
return
# fix FREQ for any additional samples -- multi-sample VarScan calling
if len(line) > 11:
for i in range(11, len(line)):
line = _normalize_freq(line, i)
#FIXME: VarScan also produces invalid REF records (e.g. CAA/A)
# This is not handled yet.
if "+" in ALT or "-" in ALT:
if "/" not in ALT:
if ALT[0] == "+":
R = REF
A = REF + ALT[1:]
elif ALT[0] == "-":
R = REF + ALT[1:]
A = REF
else:
Ins = [p[1:] for p in ALT.split("/") if p[0] == "+"]
Del = [p[1:] for p in ALT.split("/") if p[0] == "-"]
if len(Del):
REF += sorted(Del, key=lambda x: len(x))[-1]
A = ",".join([REF[::-1].replace(p[::-1], "", 1)[::-1]
for p in Del] + [REF + p for p in Ins])
R = REF
REF = R
ALT = A
else:
ALT = ALT.replace('/', ',')
line[3] = REF
line[4] = ALT
return "\t".join(line) | 0.001653 |
def open( self, **kwargs ):
"""Append an opening tag."""
if self.tag in self.parent.twotags or self.tag in self.parent.onetags:
self.render( self.tag, False, None, kwargs )
elif self.mode == 'strict_html' and self.tag in self.parent.deptags:
raise DeprecationError( self.tag ) | 0.024615 |
def get_options(argv=None):
"""
Convert options into commands
return commands, message
"""
parser = argparse.ArgumentParser(usage="spyder [options] files")
parser.add_argument('--new-instance', action='store_true', default=False,
help="Run a new instance of Spyder, even if the single "
"instance mode has been turned on (default)")
parser.add_argument('--defaults', dest="reset_to_defaults",
action='store_true', default=False,
help="Reset configuration settings to defaults")
parser.add_argument('--reset', dest="reset_config_files",
action='store_true', default=False,
help="Remove all configuration files!")
parser.add_argument('--optimize', action='store_true', default=False,
help="Optimize Spyder bytecode (this may require "
"administrative privileges)")
parser.add_argument('-w', '--workdir', dest="working_directory", default=None,
help="Default working directory")
parser.add_argument('--hide-console', action='store_true', default=False,
help="Hide parent console window (Windows)")
parser.add_argument('--show-console', action='store_true', default=False,
help="(Deprecated) Does nothing, now the default behavior "
"is to show the console")
parser.add_argument('--multithread', dest="multithreaded",
action='store_true', default=False,
help="Internal console is executed in another thread "
"(separate from main application thread)")
parser.add_argument('--profile', action='store_true', default=False,
help="Profile mode (internal test, "
"not related with Python profiling)")
parser.add_argument('--window-title', type=str, default=None,
help="String to show in the main window title")
parser.add_argument('-p', '--project', default=None, type=str,
dest="project",
help="Path that contains an Spyder project")
parser.add_argument('--opengl', default=None,
dest="opengl_implementation",
choices=['software', 'desktop', 'gles'],
help=("OpenGL implementation to pass to Qt")
)
parser.add_argument('--debug-info', default=None,
dest="debug_info",
choices=['minimal', 'verbose'],
help=("Level of internal debugging info to give. "
"'minimal' only logs a small amount of "
"confirmation messages and 'verbose' logs a "
"lot of detailed information.")
)
parser.add_argument('--debug-output', default='terminal',
dest="debug_output",
choices=['terminal', 'file'],
help=("Print internal debugging info either to the "
"terminal or to a file called spyder-debug.log "
"in your current working directory. Default is "
"'terminal'.")
)
parser.add_argument('files', nargs='*')
options = parser.parse_args(argv)
args = options.files
return options, args | 0.007013 |
def get_unique_document_id(query_str):
# type: (str) -> str
"""Get a unique id given a query_string"""
assert isinstance(query_str, string_types), (
"Must receive a string as query_str. Received {}"
).format(repr(query_str))
if query_str not in _cached_queries:
_cached_queries[query_str] = sha1(str(query_str).encode("utf-8")).hexdigest()
return _cached_queries[query_str] | 0.004831 |
def cluster_node_present(name, node, extra_args=None):
'''
Add a node to the Pacemaker cluster via PCS
Should be run on one cluster node only
(there may be races)
Can only be run on a already setup/added node
name
Irrelevant, not used (recommended: pcs_setup__node_add_{{node}})
node
node that should be added
extra_args
list of extra args for the \'pcs cluster node add\' command
Example:
.. code-block:: yaml
pcs_setup__node_add_node1.example.com:
pcs.cluster_node_present:
- node: node1.example.com
- extra_args:
- '--start'
- '--enable'
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
node_add_required = True
current_nodes = []
is_member_cmd = ['pcs', 'status', 'nodes', 'corosync']
is_member = __salt__['cmd.run_all'](is_member_cmd, output_loglevel='trace', python_shell=False)
log.trace('Output of pcs status nodes corosync: %s', is_member)
for line in is_member['stdout'].splitlines():
try:
key, value = [x.strip() for x in line.split(':')]
except ValueError:
continue
else:
if not value or key not in ('Offline', 'Online'):
continue
values = value.split(':')
if node in values:
node_add_required = False
ret['comment'] += 'Node {0} is already member of the cluster\n'.format(node)
else:
current_nodes += values
if not node_add_required:
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] += 'Node {0} is set to be added to the cluster\n'.format(node)
return ret
if not isinstance(extra_args, (list, tuple)):
extra_args = []
node_add = __salt__['pcs.cluster_node_add'](node=node, extra_args=extra_args)
log.trace('Output of pcs.cluster_node_add: %s', node_add)
node_add_dict = {}
for line in node_add['stdout'].splitlines():
log.trace('line: %s', line)
log.trace('line.split(:).len: %s', len(line.split(':')))
if len(line.split(':')) in [2]:
current_node = line.split(':')[0].strip()
current_node_add_state = line.split(':')[1].strip()
if current_node in current_nodes + [node]:
node_add_dict.update({current_node: current_node_add_state})
log.trace('node_add_dict: %s', node_add_dict)
for current_node in current_nodes:
if current_node in node_add_dict:
if node_add_dict[current_node] not in ['Corosync updated']:
ret['result'] = False
ret['comment'] += 'Failed to update corosync.conf on node {0}\n'.format(current_node)
ret['comment'] += '{0}: node_add_dict: {1}\n'.format(current_node, node_add_dict[current_node])
else:
ret['result'] = False
ret['comment'] += 'Failed to update corosync.conf on node {0}\n'.format(current_node)
if node in node_add_dict and node_add_dict[node] in ['Succeeded', 'Success']:
ret['comment'] += 'Added node {0}\n'.format(node)
ret['changes'].update({node: {'old': '', 'new': 'Added'}})
else:
ret['result'] = False
ret['comment'] += 'Failed to add node{0}\n'.format(node)
if node in node_add_dict:
ret['comment'] += '{0}: node_add_dict: {1}\n'.format(node, node_add_dict[node])
ret['comment'] += six.text_type(node_add)
log.trace('ret: %s', ret)
return ret | 0.002756 |
def specialize(self, domain):
"""Specialize ``self`` to a concrete domain.
"""
if domain == self.domain:
return self
return type(self)(
dtype=self.dtype,
missing_value=self.missing_value,
dataset=self._dataset.specialize(domain),
name=self._name,
doc=self.__doc__,
metadata=self._metadata,
) | 0.004808 |
def remove(path, force=False):
'''
Remove file or directory (recursively), if it exists.
On NFS file systems, if a directory contains :file:`.nfs*` temporary files
(sometimes created when deleting a file), it waits for them to go away.
Parameters
----------
path : ~pathlib.Path
Path to remove.
force : bool
If True, will remove files and directories even if they are read-only
(as if first doing ``chmod -R +w``).
'''
if not path.exists():
return
else:
if force:
with suppress(FileNotFoundError):
chmod(path, 0o700, '+', recursive=True)
if path.is_dir() and not path.is_symlink():
# Note: shutil.rmtree did not handle NFS well
# First remove all files
for dir_, dirs, files in os.walk(str(path), topdown=False): # bottom-up walk
dir_ = Path(dir_)
for file in files:
with suppress(FileNotFoundError):
(dir_ / file).unlink()
for file in dirs: # Note: os.walk treats symlinks to directories as directories
file = dir_ / file
if file.is_symlink():
with suppress(FileNotFoundError):
file.unlink()
# Now remove all dirs, being careful of any lingering .nfs* files
for dir_, _, _ in os.walk(str(path), topdown=False): # bottom-up walk
dir_ = Path(dir_)
with suppress(FileNotFoundError):
# wait for .nfs* files
children = list(dir_.iterdir())
while children:
# only wait for nfs temporary files
if any(not child.name.startswith('.nfs') for child in children):
dir_.rmdir() # raises dir not empty
# wait and go again
time.sleep(.1)
children = list(dir_.iterdir())
# rm
dir_.rmdir()
else:
with suppress(FileNotFoundError):
path.unlink() | 0.003159 |
def add_sources_from_roi(self, names, roi, free=False, **kwargs):
"""Add multiple sources to the current ROI model copied from another ROI model.
Parameters
----------
names : list
List of str source names to add.
roi : `~fermipy.roi_model.ROIModel` object
The roi model from which to add sources.
free : bool
Initialize the source with a free normalization paramter.
"""
for name in names:
self.add_source(name, roi[name].data, free=free, **kwargs) | 0.005291 |
def p_while_start(p):
""" while_start : WHILE expr
"""
p[0] = p[2]
gl.LOOPS.append(('WHILE',))
if is_number(p[2]) and not p[2].value:
api.errmsg.warning_condition_is_always(p.lineno(1)) | 0.004695 |
def people(self):
"""
Retrieve all people of the company
:return: list of people objects
:rtype: list
"""
return fields.ListField(name=HightonConstants.PEOPLE, init_class=Person).decode(
self.element_from_string(
self._get_request(
endpoint=self.ENDPOINT + '/' + str(self.id) + '/people',
).text
)
) | 0.006944 |
def process(self, node, type):
"""
Process an object graph representation of the xml L{node}.
@param node: An XML tree.
@type node: L{sax.element.Element}
@param type: The I{optional} schema type.
@type type: L{xsd.sxbase.SchemaObject}
@return: A suds object.
@rtype: L{Object}
"""
content = Content(node)
content.type = type
return Core.process(self, content) | 0.004386 |
def semicolon(f):
"""Add a semicolon to the result of a visit_* call.
Parameters
----------
f : callable
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs) + ';'
return wrapper | 0.004032 |
def assert_valid(self, instance, value=None):
"""Checks if valid, including HasProperty instances pass validation"""
valid = super(Instance, self).assert_valid(instance, value)
if not valid:
return False
if value is None:
value = instance._get(self.name)
if isinstance(value, HasProperties):
value.validate()
return True | 0.00495 |
def object_merge(old, new, unique=False):
"""
Recursively merge two data structures.
:param unique: When set to True existing list items are not set.
"""
if isinstance(old, list) and isinstance(new, list):
if old == new:
return
for item in old[::-1]:
if unique and item in new:
continue
new.insert(0, item)
if isinstance(old, dict) and isinstance(new, dict):
for key, value in old.items():
if key not in new:
new[key] = value
else:
object_merge(value, new[key]) | 0.001618 |
def create_team(self, name):
''' Creates a new Team
Creates a new Team and makes you a member. You must not currently belong to a team to invoke.
Args:
name (str): The name of your team
Returns:
A Team object
'''
request = self._get_request()
return request.post(self.TEAM_CREATE_URL, {"name": name}) | 0.007792 |
def export_cached(self):
"""Export cached remote functions
Note: this should be called only once when worker is connected.
"""
for remote_function in self._functions_to_export:
self._do_export(remote_function)
self._functions_to_export = None
for info in self._actors_to_export:
(key, actor_class_info) = info
self._publish_actor_class_to_key(key, actor_class_info) | 0.004444 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.