text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An IOError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1 | 0.005119 |
def XMPP_display(self,*arg):
""" For XMPP Demo
輸出到 XMPP 之樣式。
"""
MA = ''
for i in arg:
MAs = '- MA%02s: %.2f %s(%s)\n' % (
unicode(i),
self.MA(i),
self.MAC(i),
unicode(self.MA_serial(i)[0])
)
MA = MA + MAs
vol = '- Volume: %s %s(%s)' % (
unicode(self.MAVOL(1)/1000),
unicode(self.MACVOL(1)),
unicode(self.MAVOL_serial(1)[0])
)
MAO = self.MAO(3,6)
re = """%(stock_name)s %(stock_no)s
%(stock_date)s: %(stock_price)s %(stock_range)s(%(range_per)+.2f%%)
%(MA)s%(vol)s
- MAO(3-6): %(MAO_v).2f %(MAO_c)s(%(MAO_times)s)
- RABC: %(RABC)s""" % {
'stock_name': unicode(self.stock_name),
'stock_no': unicode(self.stock_no),
'stock_date': unicode(self.data_date[-1]),
'stock_price': unicode(self.raw_data[-1]),
'stock_range': unicode(self.stock_range[-1]),
'range_per': self.range_per,
'MA': MA,
'vol': vol,
'MAO_v': MAO[0][1][-1],
'MAO_c': unicode(MAO[1]),
'MAO_times': unicode(MAO[0][0]),
'RABC': self.RABC
}
return re | 0.004433 |
def as_completed(objects, count=None, timeout=None):
"""Wait for one or more waitable objects, yielding them as they become
ready.
This is the iterator/generator version of :func:`wait`.
"""
for obj in objects:
if not hasattr(obj, 'add_done_callback'):
raise TypeError('Expecting sequence of waitable objects')
if count is None:
count = len(objects)
if count < 0 or count > len(objects):
raise ValueError('count must be between 0 and len(objects)')
if count == 0:
return
pending = list(objects)
for obj in _wait(pending, timeout):
yield obj
count -= 1
if count == 0:
break | 0.001443 |
def reassign_log_entry_to_log(self, log_entry_id, from_log_id, to_log_id):
"""Moves a ``LogEntry`` from one ``Log`` to another.
Mappings to other ``Logs`` are unaffected.
arg: log_entry_id (osid.id.Id): the ``Id`` of the
``LogEntry``
arg: from_log_id (osid.id.Id): the ``Id`` of the current
``Log``
arg: to_log_id (osid.id.Id): the ``Id`` of the destination
``Log``
raise: NotFound - ``log_entry_id, from_log_id,`` or
``to_log_id`` not found or ``log_entry_id`` not mapped
to ``from_log_id``
raise: NullArgument - ``log_entry_id, from_log_id,`` or
``to_log_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.reassign_resource_to_bin
self.assign_log_entry_to_log(log_entry_id, to_log_id)
try:
self.unassign_log_entry_from_log(log_entry_id, from_log_id)
except: # something went wrong, roll back assignment to to_log_id
self.unassign_log_entry_from_log(log_entry_id, to_log_id)
raise | 0.002198 |
def _eta_from_phi(self):
"""Update `eta` using current `phi`."""
self.eta = scipy.ndarray(N_NT - 1, dtype='float')
etaprod = 1.0
for w in range(N_NT - 1):
self.eta[w] = 1.0 - self.phi[w] / etaprod
etaprod *= self.eta[w]
_checkParam('eta', self.eta, self.PARAMLIMITS, self.PARAMTYPES) | 0.005764 |
def after_install(options, home_dir):
# --- CUT here ---
"""
called after virtualenv was created and pip/setuptools installed.
Now we installed requirement libs/packages.
"""
if options.install_type==INST_PYPI:
requirements=NORMAL_INSTALLATION
elif options.install_type==INST_GIT:
requirements=GIT_READONLY_INSTALLATION
elif options.install_type==INST_DEV:
requirements=DEVELOPER_INSTALLATION
else:
# Should never happen
raise RuntimeError("Install type %r unknown?!?" % options.install_type)
env_subprocess = EnvSubprocess(home_dir) # from bootstrap_env.bootstrap_install_pip
logfile = os.path.join(env_subprocess.abs_home_dir, "install.log")
for requirement in requirements:
sys.stdout.write("\n\nInstall %r:\n" % requirement)
env_subprocess.call_env_pip(["install", "--log=%s" % logfile, requirement])
sys.stdout.write("\n") | 0.010616 |
def _adjustSectionAlignment(self, value, fileAlignment, sectionAlignment):
"""
Align a value to C{SectionAligment}.
@type value: int
@param value: The value to be aligned.
@type fileAlignment: int
@param fileAlignment: The value to be used as C{FileAlignment}.
@type sectionAlignment: int
@param sectionAlignment: The value to be used as C{SectionAlignment}.
@rtype: int
@return: The aligned value.
"""
if fileAlignment < consts.DEFAULT_FILE_ALIGNMENT:
if fileAligment != sectionAlignment:
print "FileAlignment does not match SectionAlignment."
if sectionAlignment < consts.DEFAULT_PAGE_SIZE:
sectionAlignment = fileAlignment
if sectionAlignment and value % sectionAlignment:
return sectionAlignment * ((value / sectionAlignment) + 1)
return value | 0.009202 |
def get_upcoming_events(self):
"""Retreives a list of Notification_Occurrence_Events that have not ended yet
:return: SoftLayer_Notification_Occurrence_Event
"""
mask = "mask[id, subject, startDate, endDate, statusCode, acknowledgedFlag, impactedResourceCount, updateCount]"
_filter = {
'endDate': {
'operation': '> sysdate'
},
'startDate': {
'operation': 'orderBy',
'options': [{
'name': 'sort',
'value': ['ASC']
}]
}
}
return self.client.call('Notification_Occurrence_Event', 'getAllObjects', filter=_filter, mask=mask, iter=True) | 0.006748 |
def __search_files(self, files):
"""
Searches in given files.
:param files: Files.
:type files: list
"""
for file in files:
if self.__interrupt:
return
if not foundations.common.path_exists(file):
continue
if foundations.io.is_readable(file):
if foundations.io.is_binary_file(file):
continue
LOGGER.info("{0} | Searching '{1}' file!".format(self.__class__.__name__, file))
cache_data = self.__container.files_cache.get_content(file)
if not cache_data:
reader = foundations.io.File(file)
content = reader.read()
if content is None:
LOGGER.warning("!> Error occured while reading '{0}' file proceeding to next one!".format(file))
continue
self.__container.files_cache.add_content(**{file: CacheData(content=content, document=None)})
else:
content = cache_data.content
occurrences = self.__search_document(QTextDocument(QString(content)), self.__pattern, self.__settings)
occurrences and self.__search_results.append(SearchResult(file=file,
pattern=self.__pattern,
settings=self.__settings,
occurrences=occurrences)) | 0.006361 |
def registration_success(self, stanza):
"""Handle registration success.
[client only]
Clean up registration stuff, change state to "registered" and initialize
authentication.
:Parameters:
- `stanza`: the stanza received.
:Types:
- `stanza`: `pyxmpp.iq.Iq`"""
_unused = stanza
self.lock.acquire()
try:
self.state_change("registered", self.registration_form)
if ('FORM_TYPE' in self.registration_form
and self.registration_form['FORM_TYPE'].value == 'jabber:iq:register'):
if 'username' in self.registration_form:
self.my_jid = JID(self.registration_form['username'].value,
self.my_jid.domain, self.my_jid.resource)
if 'password' in self.registration_form:
self.password = self.registration_form['password'].value
self.registration_callback = None
self._post_connect()
finally:
self.lock.release() | 0.004621 |
def cmd_msg(self, args):
'''control behaviour of the module'''
if len(args) == 0:
print(self.usage())
txt = ' '.join(args)
self.master.mav.statustext_send(mavutil.mavlink.MAV_SEVERITY_NOTICE,
txt) | 0.007143 |
def backward(self, diff_x, influences, activations, **kwargs):
"""
Backward pass through the network, including update.
Parameters
----------
diff_x : numpy array
A matrix containing the differences between the input and neurons.
influences : numpy array
A matrix containing the influence each neuron has on each
other neuron. This is used to calculate the updates.
activations : numpy array
The activations each neuron has to each data point. This is used
to calculate the BMU.
differency_y : numpy array
The differences between the input and context neurons.
Returns
-------
updates : tuple of arrays
The updates to the weights and context weights, respectively.
"""
diff_y = kwargs['diff_y']
bmu = self._get_bmu(activations)
influence = influences[bmu]
# Update
x_update = np.multiply(diff_x, influence)
y_update = np.multiply(diff_y, influence)
return x_update, y_update | 0.001794 |
def resizeAllSections(header, sectionSize):
""" Sets all sections (columns or rows) of a header to the same section size.
:param header: a QHeaderView
:param sectionSize: the new size of the header section in pixels
"""
for idx in range(header.length()):
header.resizeSection(idx, sectionSize) | 0.006061 |
def get_cert_serial(cert_file):
'''
Get the serial number of a certificate file
cert_file
The certificate file to find the serial for
CLI Example:
.. code-block:: bash
salt '*' certutil.get_cert_serial <certificate name>
'''
cmd = "certutil.exe -silent -verify {0}".format(cert_file)
out = __salt__['cmd.run'](cmd)
# match serial number by paragraph to work with multiple languages
matches = re.search(r":\s*(\w*)\r\n\r\n", out)
if matches is not None:
return matches.groups()[0].strip()
else:
return None | 0.001698 |
def getyrdoy(date):
"""Return a tuple of year, day of year for a supplied datetime object."""
try:
doy = date.toordinal()-datetime(date.year,1,1).toordinal()+1
except AttributeError:
raise AttributeError("Must supply a pandas datetime object or " +
"equivalent")
else:
return date.year, doy | 0.008333 |
def add_text_item(self, collection_uri, name, metadata, text, title=None):
"""Add a new item to a collection containing a single
text document.
The full text of the text document is specified as the text
argument and will be stored with the same name as the
item and a .txt extension.
This is a shorthand for the more general add_item method.
:param collection_uri: The URI that references the collection
:type collection_uri: String
:param name: The item name, suitable for use in a URI (no spaces)
:type name: String
:param metadata: a dictionary of metadata values describing the item
:type metadata: Dict
:param text: the full text of the document associated with this item
:type text: String
:param title: document title, defaults to the item name
:type title: String
:rtype String
:returns: the URI of the created item
:raises: APIError if the request was not successful
"""
docname = name + ".txt"
if title is None:
title = name
metadata['dcterms:identifier'] = name
metadata['@type'] = 'ausnc:AusNCObject'
metadata['hcsvlab:display_document'] = {'@id': docname}
metadata['hcsvlab:indexable_document'] = {'@id': docname}
metadata['ausnc:document'] = [{ '@id': 'document1.txt',
'@type': 'foaf:Document',
'dcterms:extent': len(text),
'dcterms:identifier': docname,
'dcterms:title': title,
'dcterms:type': 'Text'}]
meta = {'items': [{'metadata': { '@context': self.context,
'@graph': [metadata]
},
'documents': [{'content': text, 'identifier': docname}]
}]
}
response = self.api_request(collection_uri, method='POST', data=json.dumps(meta))
# this will raise an exception if the request fails
self.__check_success(response)
item_uri = collection_uri + "/" + response['success'][0]
return item_uri | 0.004254 |
def subvolume_get_default(path):
'''
Get the default subvolume of the filesystem path
path
Mount point for the subvolume
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_get_default /var/volumes/tmp
'''
cmd = ['btrfs', 'subvolume', 'get-default', path]
res = __salt__['cmd.run_all'](cmd)
salt.utils.fsutils._verify_run(res)
line = res['stdout'].strip()
# The ID is the second parameter, and the name the last one, or
# '(FS_TREE)'
#
# When the default one is set:
# ID 5 (FS_TREE)
#
# When we manually set a different one (var):
# ID 257 gen 8 top level 5 path var
#
id_ = line.split()[1]
name = line.split()[-1]
return {
'id': id_,
'name': name,
} | 0.001267 |
def _calc_snr(volume,
mask,
dilation=5,
reference_tr=None,
):
""" Calculate the the SNR of a volume
Calculates the Signal to Noise Ratio, the mean of brain voxels
divided by the standard deviation across non-brain voxels. Specify a TR
value to calculate the mean and standard deviation for that TR. To
calculate the standard deviation of non-brain voxels we can subtract
any baseline structure away first, hence getting at deviations due to the
system noise and not something like high baseline values in non-brain
parts of the body.
Parameters
----------
volume : 4d array, float
Take a volume time series
mask : 3d array, binary
A binary mask the same size as the volume
dilation : int
How many binary dilations do you want to perform on the mask to
determine the non-brain voxels. If you increase this the SNR
increases and the non-brain voxels (after baseline subtraction) more
closely resemble a gaussian
reference_tr : int or list
Specifies the TR to calculate the SNR for. If multiple are supplied
then it will use the average of them.
Returns
-------
snr : float
The SNR of the volume
"""
# If no TR is specified then take all of them
if reference_tr is None:
reference_tr = list(range(volume.shape[3]))
# Dilate the mask in order to ensure that non-brain voxels are far from
# the brain
if dilation > 0:
mask_dilated = ndimage.morphology.binary_dilation(mask,
iterations=dilation)
else:
mask_dilated = mask
# Make a matrix of brain and non_brain voxels, selecting the timepoint/s
brain_voxels = volume[mask > 0][:, reference_tr]
nonbrain_voxels = (volume[:, :, :, reference_tr]).astype('float64')
# If you have multiple TRs
if len(brain_voxels.shape) > 1:
brain_voxels = np.mean(brain_voxels, 1)
nonbrain_voxels = np.mean(nonbrain_voxels, 3)
nonbrain_voxels = nonbrain_voxels[mask_dilated == 0]
# Take the means of each voxel over time
mean_voxels = np.nanmean(brain_voxels)
# Find the standard deviation of the voxels
std_voxels = np.nanstd(nonbrain_voxels)
# Return the snr
return mean_voxels / std_voxels | 0.000416 |
def copy_dir(bucket_name, src_path, dest_path,
aws_access_key_id=None, aws_secret_access_key=None,
aws_profile=None,
surrogate_key=None, cache_control=None,
surrogate_control=None,
create_directory_redirect_object=True):
"""Copy objects from one directory in a bucket to another directory in
the same bucket.
Object metadata is preserved while copying, with the following exceptions:
- If a new surrogate key is provided it will replace the original one.
- If ``cache_control`` and ``surrogate_control`` values are provided they
will replace the old one.
Parameters
----------
bucket_name : `str`
Name of an S3 bucket.
src_path : `str`
Source directory in the S3 bucket. The ``src_path`` should ideally end
in a trailing `'/'`. E.g. `'dir/dir2/'`.
dest_path : `str`
Destination directory in the S3 bucket. The ``dest_path`` should
ideally end in a trailing `'/'`. E.g. `'dir/dir2/'`. The destination
path cannot contain the source path.
aws_access_key_id : `str`
The access key for your AWS account. Also set
``aws_secret_access_key``.
aws_secret_access_key : `str`
The secret key for your AWS account.
aws_profile : `str`, optional
Name of AWS profile in :file:`~/.aws/credentials`. Use this instead
of ``aws_access_key_id`` and ``aws_secret_access_key`` for file-based
credentials.
surrogate_key : `str`, optional
The surrogate key to insert in the header of all objects in the
``x-amz-meta-surrogate-key`` field. This key is used to purge
builds from the Fastly CDN when Editions change.
If `None` then no header will be set.
If the object already has a ``x-amz-meta-surrogate-key`` header then
it will be replaced.
cache_control : `str`, optional
This sets (and overrides) the ``Cache-Control`` header on the copied
files. The ``Cache-Control`` header specifically dictates how content
is cached by the browser (if ``surrogate_control`` is also set).
surrogate_control : `str`, optional
This sets (and overrides) the ``x-amz-meta-surrogate-control`` header
on the copied files. The ``Surrogate-Control``
or ``x-amz-meta-surrogate-control`` header is used in priority by
Fastly to givern it's caching. This caching policy is *not* passed
to the browser.
create_directory_redirect_object : `bool`, optional
Create a directory redirect object for the root directory. The
directory redirect object is an empty S3 object named after the
directory (without a trailing slash) that contains a
``x-amz-meta-dir-redirect=true`` HTTP header. LSST the Docs' Fastly
VCL is configured to redirect requests for a directory path to the
directory's ``index.html`` (known as *courtesy redirects*).
Raises
------
ltdconveyor.s3.S3Error
Thrown by any unexpected faults from the S3 API.
RuntimeError
Thrown when the source and destination directories are the same.
"""
if not src_path.endswith('/'):
src_path += '/'
if not dest_path.endswith('/'):
dest_path += '/'
# Ensure the src_path and dest_path don't contain each other
common_prefix = os.path.commonprefix([src_path, dest_path])
if common_prefix == src_path:
msg = 'Common prefix {0} is same as source dir {1}'.format(
common_prefix, src_path)
raise RuntimeError(msg)
if common_prefix == dest_path:
msg = 'Common prefix {0} is same as dest dir {1}'.format(
common_prefix, dest_path)
raise RuntimeError(msg)
# Delete any existing objects in the destination
delete_dir(bucket_name, dest_path,
aws_access_key_id, aws_secret_access_key)
session = boto3.session.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
profile_name=aws_profile)
s3 = session.resource('s3')
bucket = s3.Bucket(bucket_name)
# Copy each object from source to destination
for src_obj in bucket.objects.filter(Prefix=src_path):
src_rel_path = os.path.relpath(src_obj.key, start=src_path)
dest_key_path = os.path.join(dest_path, src_rel_path)
# the src_obj (ObjectSummary) doesn't include headers afaik
head = s3.meta.client.head_object(Bucket=bucket_name,
Key=src_obj.key)
metadata = head['Metadata']
content_type = head['ContentType']
# try to use original Cache-Control header if new one is not set
if cache_control is None and 'CacheControl' in head:
cache_control = head['CacheControl']
if surrogate_control is not None:
metadata['surrogate-control'] = surrogate_control
if surrogate_key is not None:
metadata['surrogate-key'] = surrogate_key
s3.meta.client.copy_object(
Bucket=bucket_name,
Key=dest_key_path,
CopySource={'Bucket': bucket_name, 'Key': src_obj.key},
MetadataDirective='REPLACE',
Metadata=metadata,
ACL='public-read',
CacheControl=cache_control,
ContentType=content_type)
if create_directory_redirect_object:
dest_dirname = dest_path.rstrip('/')
obj = bucket.Object(dest_dirname)
metadata = {'dir-redirect': 'true'}
obj.put(Body='',
ACL='public-read',
Metadata=metadata,
CacheControl=cache_control) | 0.000175 |
def plural(text):
"""
>>> plural('activity')
'activities'
"""
aberrant = {
'knife': 'knives',
'self': 'selves',
'elf': 'elves',
'life': 'lives',
'hoof': 'hooves',
'leaf': 'leaves',
'echo': 'echoes',
'embargo': 'embargoes',
'hero': 'heroes',
'potato': 'potatoes',
'tomato': 'tomatoes',
'torpedo': 'torpedoes',
'veto': 'vetoes',
'child': 'children',
'woman': 'women',
'man': 'men',
'person': 'people',
'goose': 'geese',
'mouse': 'mice',
'barracks': 'barracks',
'deer': 'deer',
'nucleus': 'nuclei',
'syllabus': 'syllabi',
'focus': 'foci',
'fungus': 'fungi',
'cactus': 'cacti',
'phenomenon': 'phenomena',
'index': 'indices',
'appendix': 'appendices',
'criterion': 'criteria',
}
if text in aberrant:
result = '%s' % aberrant[text]
else:
postfix = 's'
if len(text) > 2:
vowels = 'aeiou'
if text[-2:] in ('ch', 'sh'):
postfix = 'es'
elif text[-1:] == 'y':
if (text[-2:-1] in vowels) or (text[0] in string.ascii_uppercase):
postfix = 's'
else:
postfix = 'ies'
text = text[:-1]
elif text[-2:] == 'is':
postfix = 'es'
text = text[:-2]
elif text[-1:] in ('s', 'z', 'x'):
postfix = 'es'
result = '%s%s' % (text, postfix)
return result | 0.001206 |
def set(self, data, size):
"""
Set chunk data from user-supplied data; truncate if too large. Data may
be null. Returns actual size of chunk
"""
return lib.zchunk_set(self._as_parameter_, data, size) | 0.008658 |
def serialize(self, config):
"""
:param config:
:type config: yowsup.config.base.config.Config
:return:
:rtype: bytes
"""
for transform in self._transforms:
config = transform.transform(config)
return config | 0.007067 |
def convert(gr, raw_node):
"""
Convert raw node information to a Node or Leaf instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
type, value, context, children = raw_node
if children or type in gr.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context) | 0.003086 |
def produce_csv_output(filehandle: TextIO,
fields: Sequence[str],
values: Iterable[str]) -> None:
"""
Produce CSV output, without using ``csv.writer``, so the log can be used
for lots of things.
- ... eh? What was I talking about?
- POOR; DEPRECATED.
Args:
filehandle: file to write to
fields: field names
values: values
"""
output_csv(filehandle, fields)
for row in values:
output_csv(filehandle, row) | 0.001931 |
def straight_line(title, length=100, linestyle="=", pad=0):
"""Return a fixed-length straight line with some text at the center.
Usage Example::
>>> StringTemplate.straight_line("Hello world!", 20, "-", 1)
--- Hello world! ---
"""
text = "{:%s^%s}" % (linestyle, length)
return text.format("%s%s%s" % (" "*pad, title, " "*pad)) | 0.009685 |
def delete_relay(self, relayid, data):
"""Delete relay settings"""
return self.api_call(
ENDPOINTS['relays']['delete'],
dict(relayid=relayid),
body=data) | 0.009756 |
def get_configured_hdfs_client():
"""
This is a helper that fetches the configuration value for 'client' in
the [hdfs] section. It will return the client that retains backwards
compatibility when 'client' isn't configured.
"""
config = hdfs()
custom = config.client
conf_usinf_snakebite = [
"snakebite_with_hadoopcli_fallback",
"snakebite",
]
if six.PY3 and (custom in conf_usinf_snakebite):
warnings.warn(
"snakebite client not compatible with python3 at the moment"
"falling back on hadoopcli",
stacklevel=2
)
return "hadoopcli"
return custom | 0.001506 |
def remove_droplets(self, droplet_ids):
"""
Unassign a LoadBalancer.
Args:
droplet_ids (obj:`list` of `int`): A list of Droplet IDs
"""
return self.get_data(
"load_balancers/%s/droplets/" % self.id,
type=DELETE,
params={"droplet_ids": droplet_ids}
) | 0.00578 |
def _setup_cgroup_memory_limit(self, memlimit, cgroups, pid_to_kill):
"""Start memory-limit handler.
@return None or the memory-limit handler for calling cancel()
"""
if memlimit is not None:
try:
oomThread = oomhandler.KillProcessOnOomThread(
cgroups=cgroups, pid_to_kill=pid_to_kill,
callbackFn=self._set_termination_reason,
kill_process_fn=self._kill_process)
oomThread.start()
return oomThread
except OSError as e:
logging.critical("OSError %s during setup of OomEventListenerThread: %s.",
e.errno, e.strerror)
return None | 0.004016 |
def api_key(value=None):
"""Set or get the API key.
Also set via environment variable GRAPHISTRY_API_KEY."""
if value is None:
return PyGraphistry._config['api_key']
# setter
if value is not PyGraphistry._config['api_key']:
PyGraphistry._config['api_key'] = value.strip()
PyGraphistry._is_authenticated = False | 0.005155 |
def get_lb_conn(dd_driver=None):
'''
Return a load-balancer conn object
'''
vm_ = get_configured_provider()
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if not dd_driver:
raise SaltCloudSystemExit(
'Missing dimensiondata_driver for get_lb_conn method.'
)
return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region) | 0.003448 |
def mask_brain(volume,
template_name=None,
mask_threshold=None,
mask_self=True,
):
""" Mask the simulated volume
This creates a mask specifying the approximate likelihood that a voxel is
part of the brain. All values are bounded to the range of 0 to 1. An
appropriate threshold to isolate brain voxels is >0.2. Critically,
the data that should be used to create a template shouldn't already be
masked/skull stripped. If it is then it will give in accurate estimates
of non-brain noise and corrupt estimations of SNR.
Parameters
----------
volume : multidimensional array
Either numpy array of a volume or a tuple describing the dimensions
of the mask to be created
template_name : str
What is the path to the template to be loaded? If empty then it
defaults to an MNI152 grey matter mask. This is ignored if mask_self
is True.
mask_threshold : float
What is the threshold (0 -> 1) for including a voxel in the mask? If
None then the program will try and identify the last wide peak in a
histogram of the template (assumed to be the brain voxels) and takes
the minima before that peak as the threshold. Won't work when the
data is not bimodal.
mask_self : bool or None
If set to true then it makes a mask from the volume supplied (by
averaging across time points and changing the range). If it is set
to false then it will use the template_name as an input.
Returns
----------
mask : 3 dimensional array, binary
The masked brain, thresholded to distinguish brain and non-brain
template : 3 dimensional array, float
A continuous (0 -> 1) volume describing the likelihood a voxel is in
the brain. This can be used to contrast the brain and non brain.
"""
# If the volume supplied is a 1d array then output a volume of the
# supplied dimensions
if len(volume.shape) == 1:
volume = np.ones(volume)
# Load in the mask
if mask_self is True:
mask_raw = volume
elif template_name is None:
mask_raw = np.load(resource_stream(__name__, "grey_matter_mask.npy"))
else:
mask_raw = np.load(template_name)
# Make the masks 3dremove_baseline
if len(mask_raw.shape) == 3:
mask_raw = np.array(mask_raw)
elif len(mask_raw.shape) == 4 and mask_raw.shape[3] == 1:
mask_raw = np.array(mask_raw[:, :, :, 0])
else:
mask_raw = np.mean(mask_raw, 3)
# Find the max value (so you can calulate these as proportions)
mask_max = mask_raw.max()
# Make sure the mask values range from 0 to 1 (make out of max of volume
# so that this is invertible later)
mask_raw = mask_raw / mask_max
# If there is only one brain volume then make this a forth dimension
if len(volume.shape) == 3:
temp = np.zeros([volume.shape[0], volume.shape[1], volume.shape[2], 1])
temp[:, :, :, 0] = volume
volume = temp
# Reshape the mask to be the size as the brain
brain_dim = volume.shape
mask_dim = mask_raw.shape
zoom_factor = (brain_dim[0] / mask_dim[0],
brain_dim[1] / mask_dim[1],
brain_dim[2] / mask_dim[2],
)
# Scale the mask according to the input brain
# You might get a warning if the zoom_factor is not an integer but you
# can safely ignore that.
template = ndimage.zoom(mask_raw, zoom_factor, order=2)
template[template < 0] = 0
# If the mask threshold is not supplied then guess it is a minima
# between the two peaks of the bimodal distribution of voxel activity
if mask_threshold is None:
# How many bins on either side of a peak will be compared
order = 5
# Make the histogram
template_vector = template.reshape(brain_dim[0] * brain_dim[1] *
brain_dim[2])
template_hist = np.histogram(template_vector, 100)
# Zero pad the values
binval = np.concatenate([np.zeros((order,)), template_hist[0]])
bins = np.concatenate([np.zeros((order,)), template_hist[1]])
# Identify the first two peaks
peaks = signal.argrelmax(binval, order=order)[0][0:2]
# What is the minima between peaks
minima = binval[peaks[0]:peaks[1]].min()
# What is the index of the last idx with this min value (since if
# zero, there may be many)
minima_idx = (np.where(binval[peaks[0]:peaks[1]] == minima) + peaks[
0])[-1]
# Convert the minima into a threshold
mask_threshold = bins[minima_idx][0]
# Mask the template based on the threshold
mask = np.zeros(template.shape)
mask[template > mask_threshold] = 1
return mask, template | 0.000204 |
def set_data(self, data):
"""Set data."""
if data != self.editor.model.get_data():
self.editor.set_data(data)
self.editor.adjust_columns() | 0.010989 |
def calculate(seqnon, mapcol, nmask, tests):
""" groups together several numba compiled funcs """
## create empty matrices
#LOGGER.info("tests[0] %s", tests[0])
#LOGGER.info('seqnon[[tests[0]]] %s', seqnon[[tests[0]]])
mats = chunk_to_matrices(seqnon, mapcol, nmask)
## empty svdscores for each arrangement of seqchunk
svds = np.zeros((3, 16), dtype=np.float64)
qscores = np.zeros(3, dtype=np.float64)
ranks = np.zeros(3, dtype=np.float64)
for test in range(3):
## get svd scores
svds[test] = np.linalg.svd(mats[test].astype(np.float64))[1]
ranks[test] = np.linalg.matrix_rank(mats[test].astype(np.float64))
## get minrank, or 11
minrank = int(min(11, ranks.min()))
for test in range(3):
qscores[test] = np.sqrt(np.sum(svds[test, minrank:]**2))
## sort to find the best qorder
best = np.where(qscores == qscores.min())[0]
#best = qscores[qscores == qscores.min()][0]
bidx = tests[best][0]
qsnps = count_snps(mats[best][0])
return bidx, qsnps | 0.008531 |
def register(id, url=None):
"""Register a UUID key in the global S3 bucket."""
bucket = registration_s3_bucket()
key = registration_key(id)
obj = bucket.Object(key)
obj.put(Body=url or "missing")
return _generate_s3_url(bucket, key) | 0.003906 |
def convert_multiPlanesRupture(self, node):
"""
Convert a multiPlanesRupture node.
:param node: the rupture node
"""
mag, rake, hypocenter = self.get_mag_rake_hypo(node)
with context(self.fname, node):
surfaces = list(node.getnodes('planarSurface'))
rupt = source.rupture.BaseRupture(
mag=mag, rake=rake,
tectonic_region_type=None,
hypocenter=hypocenter,
surface=self.convert_surfaces(surfaces))
return rupt | 0.003759 |
def logout(self, request):
"Logs out user and redirects them to Nexus home"
from django.contrib.auth import logout
logout(request)
return HttpResponseRedirect(reverse('nexus:index', current_app=self.name)) | 0.012552 |
def send_event(self, name, *args, **kwargs):
""" Send an event to the native handler. This call is queued and
batched.
Parameters
----------
name : str
The event name to be processed by MainActivity.processMessages.
*args: args
The arguments required by the event.
**kwargs: kwargs
Options for sending. These are:
now: boolean
Send the event now
"""
n = len(self._bridge_queue)
# Add to queue
self._bridge_queue.append((name, args))
if n == 0:
# First event, send at next available time
self._bridge_last_scheduled = time()
self.deferred_call(self._bridge_send)
return
elif kwargs.get('now'):
self._bridge_send(now=True)
return
# If it's been over 5 ms since we last scheduled, run now
dt = time() - self._bridge_last_scheduled
if dt > self._bridge_max_delay:
self._bridge_send(now=True) | 0.002804 |
def print_variables_info(self, output_file=sys.stdout):
"""Print variables information in human readble format."""
table = (' name | type size \n' +
'---------+-------------------------\n')
for name, var_info in list(self.variables.items()):
table += '{:>8} | {:>6} {!s:<10}\n'.format(name, var_info[0], var_info[1])
print(prefix_indent('variables: ', table), file=output_file) | 0.006593 |
def write_metadata(self, fp):
"""Adds data to the metadata that's written.
Parameters
----------
fp : pycbc.inference.io.BaseInferenceFile instance
The inference file to write to.
"""
super(BaseDataModel, self).write_metadata(fp)
fp.write_stilde(self.data) | 0.006154 |
def unpack(self, source: IO):
"""
Read the Field from the file-like object `fio`.
.. note::
Advanced usage only. You will typically never need to call this
method as it will be called for you when loading a ClassFile.
:param source: Any file-like object providing `read()`
"""
self.access_flags.unpack(source.read(2))
self._name_index, self._descriptor_index = unpack('>HH', source.read(4))
self.attributes.unpack(source) | 0.005859 |
def map_sections(fun, neurites, neurite_type=NeuriteType.all, iterator_type=Tree.ipreorder):
'''Map `fun` to all the sections in a collection of neurites'''
return map(fun, iter_sections(neurites,
iterator_type=iterator_type,
neurite_filter=is_type(neurite_type))) | 0.005882 |
def show(self):
"""Show the circuit expression in an IPython notebook."""
# noinspection PyPackageRequirements
from IPython.display import Image, display
fname = self.render()
display(Image(filename=fname)) | 0.008065 |
def start(self):
"""Start the consumer. This starts a listen loop on a zmq.PULL socket,
calling ``self.handle`` on each incoming request and pushing the response
on a zmq.PUSH socket back to the producer."""
if not self.initialized:
raise Exception("Consumer not initialized (no Producer).")
producer = self.producer
context = zmq._Context()
self.pull = context.socket(zmq.PULL)
self.push = context.socket(zmq.PUSH)
self.pull.connect('tcp://%s:%s' % (producer.host, producer.push_port))
self.push.connect('tcp://%s:%s' % (producer.host, producer.pull_port))
# TODO: notify the producer that this consumer's ready for work?
self.listen() | 0.004027 |
def _write_file_network(data, filename):
'''
Writes a file to disk
'''
with salt.utils.files.fopen(filename, 'w') as fp_:
fp_.write(salt.utils.stringutils.to_str(data)) | 0.005208 |
def get_manager(self, osid=None, impl_class_name=None, version=None):
"""Finds, loads and instantiates providers of OSID managers.
Providers must conform to an OsidManager interface. The
interfaces are defined in the OSID enumeration. For all OSID
requests, an instance of ``OsidManager`` that implements the
``OsidManager`` interface is returned. In bindings where
permitted, this can be safely cast into the requested manager.
arg: osid (osid.OSID): represents the OSID
arg: impl_class_name (string): the name of the implementation
arg: version (osid.installation.Version): the minimum
required OSID specification version
return: (osid.OsidManager) - the manager of the service
raise: ConfigurationError - an error in configuring the
implementation
raise: NotFound - the implementation class was not found
raise: NullArgument - ``impl_class_name`` or ``version`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``impl_class_name`` does not support the
requested OSID
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: After finding and instantiating the
requested ``OsidManager,`` providers must invoke
``OsidManager.initialize(OsidRuntimeManager)`` where the
environment is an instance of the current environment that
includes the configuration for the service being initialized.
The ``OsidRuntimeManager`` passed may include information useful
for the configuration such as the identity of the service being
instantiated.
"""
# This implementation assumes that all osid impls reside as seperate
# packages in the dlkit library, so that for instance the proxy manager for an
# OSID = 'osidpackage' in an implementation named 'impl_name' manager can
# be found in the python path for the module: dlkit.impl_name.osid.managers
# Also this implementation currently ignores the OSID specification version.
from importlib import import_module
try:
manager_module = import_module('dlkit.' + impl_class_name + '.' + osid.lower() + '.managers')
except ImportError:
raise NotFound()
try:
manager = getattr(manager_module, osid.title() + 'Manager')
except AttributeError:
raise Unsupported()
return manager | 0.002707 |
def get_web_element(self, element):
"""Return the web element from a page element or its locator
:param element: either a WebElement, PageElement or element locator as a tuple (locator_type, locator_value)
:returns: WebElement object
"""
from toolium.pageelements.page_element import PageElement
if isinstance(element, WebElement):
web_element = element
elif isinstance(element, PageElement):
web_element = element.web_element
elif isinstance(element, tuple):
web_element = self.driver_wrapper.driver.find_element(*element)
else:
web_element = None
return web_element | 0.00431 |
def _doClobber(self):
"""Remove the work directory"""
rc = yield self.runRmdir(self.workdir, timeout=self.timeout)
if rc != RC_SUCCESS:
raise RuntimeError("Failed to delete directory")
return rc | 0.008403 |
async def addreaction(self, ctx, *, reactor=""):
"""Interactively adds a custom reaction"""
if not reactor:
await self.bot.say("What should I react to?")
response = await self.bot.wait_for_message(author=ctx.message.author)
reactor = response.content
data = self.config.get(ctx.message.server.id, {})
keyword = data.get(reactor, {})
if keyword:
await self.bot.responses.failure(message="Reaction '{}' already exists.".format(reactor))
return
await self.bot.say("Okay, I'll react to '{}'. What do you want me to say? (Type $none for no response)".format(reactor))
response = await self.bot.wait_for_message(author=ctx.message.author)
reactions = []
def check(reaction, user):
if str(reaction.emoji) != "\U000023f9":
reactions.append(reaction.emoji)
return False
else:
return user == ctx.message.author
msg = await self.bot.say("Awesome! Now react to this message any reactions I should have to '{}'. (React \U000023f9 to stop)".format(reactor))
await self.bot.wait_for_reaction(message=msg, check=check)
for i, reaction in enumerate(reactions):
reaction = reaction if isinstance(reaction, str) else reaction.name + ":" + str(reaction.id)
await self.bot.add_reaction(ctx.message, reaction)
reactions[i] = reaction
if response:
keyword["response"] = response.content if response.content.lower() != "$none" else ""
keyword["reaction"] = reactions
data[reactor] = keyword
await self.config.put(ctx.message.server.id, data)
await self.bot.responses.success(message="Reaction '{}' has been added.".format(reactor)) | 0.005461 |
def get_default_recipients(self):
''' Overrides EmailRecipientMixin '''
return [x.registration.customer.email for x in self.eventregistration_set.filter(cancelled=False)] | 0.016129 |
def wait_for_a_future(futures, print_traceback=False):
"""
Return the next future that completes. If a KeyboardInterrupt is
received, then the entire process is exited immediately. See
wait_for_all_futures for more notes.
"""
while True:
try:
future = next(concurrent.futures.as_completed(futures, timeout=THREAD_TIMEOUT_MAX))
break
except concurrent.futures.TimeoutError:
pass
except KeyboardInterrupt:
if print_traceback:
traceback.print_stack()
else:
print('')
os._exit(os.EX_IOERR)
return future | 0.00304 |
def build_image_path(self, src):
"""\
This method will take an image path and build
out the absolute path to that image
* using the initial url we crawled
so we can find a link to the image
if they use relative urls like ../myimage.jpg
"""
o = urlparse(src)
# we have a full url
if o.netloc != '':
return o.geturl()
# we have a relative url
return urljoin(self.article.final_url, src) | 0.004032 |
def get_list(self, terms, limit=0, sort=False, ranks=None):
"""
Get the specified cards from the stack.
:arg term:
The search term. Can be a card full name, value, suit,
abbreviation, or stack indice.
:arg int limit:
The number of items to retrieve for each term.
:arg bool sort:
Whether or not to sort the results, by poker ranks.
:arg dict ranks:
The rank dict to reference for sorting. If ``None``, it will
default to ``DEFAULT_RANKS``.
:returns:
A list of the specified cards, if found.
"""
ranks = ranks or self.ranks
got_cards = []
try:
indices = self.find_list(terms, limit=limit)
got_cards = [self.cards[i] for i in indices if self.cards[i]
not in got_cards]
self.cards = [v for i, v in enumerate(self.cards) if
i not in indices]
except:
indices = []
for item in terms:
try:
card = self.cards[item]
if card not in got_cards:
got_cards.append(card)
indices.append(item)
except:
indices += self.find(item, limit=limit)
got_cards += [self.cards[i] for i in indices if
self.cards[i] not in got_cards]
self.cards = [v for i, v in enumerate(self.cards) if
i not in indices]
if sort:
got_cards = sort_cards(got_cards, ranks)
return got_cards | 0.004819 |
def send_response(self, transaction):
"""
Handles the Blocks option in a outgoing response.
:type transaction: Transaction
:param transaction: the transaction that owns the response
:rtype : Transaction
:return: the edited transaction
"""
host, port = transaction.request.source
key_token = hash(str(host) + str(port) + str(transaction.request.token))
if (key_token in self._block2_receive and transaction.response.payload is not None) or \
(transaction.response.payload is not None and len(transaction.response.payload) > defines.MAX_PAYLOAD):
if key_token in self._block2_receive:
byte = self._block2_receive[key_token].byte
size = self._block2_receive[key_token].size
num = self._block2_receive[key_token].num
else:
byte = 0
num = 0
size = defines.MAX_PAYLOAD
m = 1
self._block2_receive[key_token] = BlockItem(byte, num, m, size)
if len(transaction.response.payload) > (byte + size):
m = 1
else:
m = 0
transaction.response.payload = transaction.response.payload[byte:byte + size]
del transaction.response.block2
transaction.response.block2 = (num, m, size)
self._block2_receive[key_token].byte += size
self._block2_receive[key_token].num += 1
if m == 0:
del self._block2_receive[key_token]
return transaction | 0.003708 |
async def change_url(self, url: str, description: str = None):
""" change the url of that attachment
|methcoro|
Args:
url: url you want to change
description: *optional* description for your attachment
Raises:
ValueError: url must not be None
APIException
"""
await self._change(url=url, description=description) | 0.004854 |
def _disconnected(self, uri):
"""Disconnected callback from Crazyflie API"""
self.param_updater.close()
self.is_updated = False
# Clear all values from the previous Crazyflie
self.toc = Toc()
self.values = {} | 0.007813 |
def hup_hook(signal_or_callable=signal.SIGTERM, verbose=False):
"""
Register a signal handler for `signal.SIGHUP` that checks for modified
files and only acts if at least one modified file is found.
@type signal_or_callable: str, int or callable
@param signal_or_callable: You can pass either a signal or a callable.
The signal can be specified by name or number. If specifying by name,
the 'SIG' portion is optional. For example, valid values for SIGINT
include 'INT', 'SIGINT' and `signal.SIGINT`.
Alternatively, you can pass a callable that will be called with the list
of changed files. So the call signature should be `func(list)`. The return
value of the callable is ignored.
@type verbose: bool or callable
@param verbose: Defaults to False. True indicates that a message should be
printed. You can also pass a callable such as log.info.
"""
#noinspection PyUnusedLocal
def handle_hup(signum, frame):
changed = modified()
if changed:
if callable(signal_or_callable):
func = signal_or_callable
args = (changed,)
op = 'Calling'
try:
name = signal_or_callable.__name__
except Exception:
name = str(signal_or_callable)
else:
if isinstance(signal_or_callable, int):
name = str(signal_or_callable)
signum = signal_or_callable
if verbose:
for item in dir(signal):
if item.startswith('SIG') and getattr(signal, item) == signal_or_callable:
name = item
break
else:
name = signal_or_callable if signal_or_callable.startswith('SIG') else 'SIG' + signal_or_callable
signum = getattr(signal, name)
func = os.kill
args = (os.getpid(), signum)
op = 'Sending'
if verbose:
more = ' and {0} other files'.format(len(changed)) if len(changed) > 1 else ''
message = '{0} {1} because {2}{3} changed'.format(op, name, changed[0], more)
if callable(verbose):
#noinspection PyCallingNonCallable
verbose(message)
else:
print(message)
func(*args)
files()
signal.signal(signal.SIGHUP, handle_hup)
signal.siginterrupt(signal.SIGHUP, False) | 0.003418 |
def read_reference_resource_list(self, ref_sitemap, name='reference'):
"""Read reference resource list and return the ResourceList object.
The name parameter is used just in output messages to say what type
of resource list is being read.
"""
rl = ResourceList()
self.logger.info(
"Reading %s resource list from %s ..." %
(name, ref_sitemap))
rl.mapper = self.mapper
rl.read(uri=ref_sitemap, index_only=(not self.allow_multifile))
num_entries = len(rl.resources)
self.logger.info(
"Read %s resource list with %d entries in %d sitemaps" %
(name, num_entries, rl.num_files))
if (self.verbose):
to_show = 100
override_str = ' (override with --max-sitemap-entries)'
if (self.max_sitemap_entries):
to_show = self.max_sitemap_entries
override_str = ''
if (num_entries > to_show):
print(
"Showing first %d entries sorted by URI%s..." %
(to_show, override_str))
n = 0
for r in rl.resources:
print(r)
n += 1
if (n >= to_show):
break
return(rl) | 0.00153 |
def _conditional(Xnew, feat, kern, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False):
"""
Multi-output GP with fully correlated inducing variables.
The inducing variables are shaped in the same way as evaluations of K, to allow a default
inducing point scheme for multi-output kernels.
The covariance matrices used to calculate the conditional have the following shape:
- Kuu: M x L x M x L
- Kuf: M x L x N x P
- Kff: N x P x N x P, N x P x P, N x P
Further reference
-----------------
- See `gpflow.conditionals._conditional` for a detailed explanation of
conditional in the single-output case.
- See the multiouput notebook for more information about the multiouput framework.
Parameters
----------
:param f: variational mean, ML x 1
:param q_sqrt: standard-deviations or cholesky, ML x 1 or 1 x ML x ML
"""
logger.debug("Conditional: InducingPoints -- Mok")
Kmm = Kuu(feat, kern, jitter=settings.numerics.jitter_level) # M x L x M x L
Kmn = Kuf(feat, kern, Xnew) # M x L x N x P
Knn = kern.K(Xnew, full_output_cov=full_output_cov) if full_cov \
else kern.Kdiag(Xnew, full_output_cov=full_output_cov) # N x P(x N)x P or N x P(x P)
M, L, N, K = [tf.shape(Kmn)[i] for i in range(Kmn.shape.ndims)]
Kmm = tf.reshape(Kmm, (M * L, M * L))
if full_cov == full_output_cov:
Kmn = tf.reshape(Kmn, (M * L, N * K))
Knn = tf.reshape(Knn, (N * K, N * K)) if full_cov else tf.reshape(Knn, (N * K,))
fmean, fvar = base_conditional(Kmn, Kmm, Knn, f, full_cov=full_cov, q_sqrt=q_sqrt, white=white) # NK x 1, 1 x NK(x NK)
fmean = tf.reshape(fmean, (N, K))
fvar = tf.reshape(fvar, (N, K, N, K) if full_cov else (N, K))
else:
Kmn = tf.reshape(Kmn, (M * L, N, K))
fmean, fvar = fully_correlated_conditional(Kmn, Kmm, Knn, f, full_cov=full_cov,
full_output_cov=full_output_cov, q_sqrt=q_sqrt, white=white)
return fmean, fvar | 0.005332 |
def dense_dropconnect(inputs,
output_size,
dropconnect_dropout=0.0,
name="dense_dropconnect",
**kwargs):
"""Dense layer with dropconnect."""
if dropconnect_dropout != 0.0:
tf.logging.info("Applying dropconnect as the kernel regularization.")
kwargs["kernel_regularizer"] = functools.partial(
tf.nn.dropout, keep_prob=1.0 - dropconnect_dropout)
return dense(inputs, output_size, use_bias=True, name=name, **kwargs) | 0.007619 |
def duplicate(cls, other):
"""Create a copy of the layer."""
return cls(other.soil_type, other.thickness, other.shear_vel) | 0.014493 |
def get_level_fmt(self, level):
"""Get format for log level."""
key = None
if level == logging.DEBUG:
key = 'debug'
elif level == logging.INFO:
key = 'info'
elif level == logging.WARNING:
key = 'warning'
elif level == logging.ERROR:
key = 'error'
elif level == logging.CRITICAL:
key = 'critical'
return self.overwrites.get(key, self.fmt) | 0.004338 |
def create_sequences(colors, vte_fix=False):
"""Create the escape sequences."""
alpha = colors["alpha"]
# Colors 0-15.
sequences = [set_color(index, colors["colors"]["color%s" % index])
for index in range(16)]
# Special colors.
# Source: https://goo.gl/KcoQgP
# 10 = foreground, 11 = background, 12 = cursor foregound
# 13 = mouse foreground, 708 = background border color.
sequences.extend([
set_special(10, colors["special"]["foreground"], "g"),
set_special(11, colors["special"]["background"], "h", alpha),
set_special(12, colors["special"]["cursor"], "l"),
set_special(13, colors["special"]["foreground"], "j"),
set_special(17, colors["special"]["foreground"], "k"),
set_special(19, colors["special"]["background"], "m"),
set_color(232, colors["special"]["background"]),
set_color(256, colors["special"]["foreground"])
])
if not vte_fix:
sequences.extend(
set_special(708, colors["special"]["background"], "", alpha)
)
if OS == "Darwin":
sequences += set_iterm_tab_color(colors["special"]["background"])
return "".join(sequences) | 0.000829 |
def calculate_size(name, expected, updated):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += LONG_SIZE_IN_BYTES
data_size += LONG_SIZE_IN_BYTES
return data_size | 0.004098 |
def mass_inform(self, msg):
"""Send an inform message to all clients.
Parameters
----------
msg : Message object
The inform message to send.
"""
assert (msg.mtype == Message.INFORM)
self._server.mass_send_message_from_thread(msg) | 0.006689 |
def download(self, chunk_size=1024):
"""Download attachment
Args:
chunk_size (int): Byte-size of chunked download request stream
Returns:
BytesIO: Stream ready for reading containing the attachment file contents
"""
stream = BytesIO()
response = self._swimlane.request(
'get',
'attachment/download/{}'.format(self.file_id),
stream=True
)
for chunk in response.iter_content(chunk_size):
stream.write(chunk)
stream.seek(0)
return stream | 0.005068 |
def catch_exceptions(*exceptions):
"""Catch all exceptions provided as arguments, and raise CloudApiException instead."""
def wrap(fn):
@functools.wraps(fn)
def wrapped_f(*args, **kwargs):
try:
return fn(*args, **kwargs)
except exceptions:
t, value, traceback = sys.exc_info()
# If any resource does not exist, return None instead of raising
if str(value.status) == '404':
return None
e = CloudApiException(str(value), value.reason, value.status)
raise_(CloudApiException, e, traceback)
return wrapped_f
return wrap | 0.004342 |
def from_json(self, data, deref=False):
"""Decode the string from JSON to return the original object (if
`deref` is true. Uses the `json.loads` function with `self.decode`
as object_hook.
:param data:
JSON encoded string.
:type data: str
:param deref:
Whether to decode records that gave `ref=True` at encoding.
:type deref: bool"""
return self.deep_decode(json.loads(data), deref) | 0.004246 |
def fraction_correct_fuzzy_linear_create_vector(z, z_cutoff, z_fuzzy_range):
'''A helper function for fraction_correct_fuzzy_linear.'''
assert(z_fuzzy_range * 2 < z_cutoff)
if (z == None or numpy.isnan(z)): # todo: and ignore_null_values: # If we are missing values then we either discount the case or consider it as incorrect depending on ignore_null_values
return None
elif (z >= z_cutoff + z_fuzzy_range): # positive e.g. z >= 1.1
return [0, 0, 1]
elif (z <= -z_cutoff - z_fuzzy_range): # negative e.g. z <= -1.1
return [1, 0, 0]
elif (-z_cutoff + z_fuzzy_range <= z <= z_cutoff - z_fuzzy_range): # neutral e.g. -0.9 <= z <= 0.9
return [0, 1, 0]
elif (-z_cutoff - z_fuzzy_range < z < -z_cutoff + z_fuzzy_range): # negative/neutral e.g. -1.1 < z < 0.9
neutrality = (z + z_cutoff + z_fuzzy_range) / (z_fuzzy_range * 2)
zvec = [1 - neutrality, neutrality, 0]
elif (z_cutoff - z_fuzzy_range < z < z_cutoff + z_fuzzy_range): # neutral/positive e.g. 0.9 < z < 1.1
positivity = (z - z_cutoff + z_fuzzy_range) / (z_fuzzy_range * 2)
zvec = [0, 1 - positivity, positivity]
else:
raise Exception('Logical error.')
# normalize the vector
length = math.sqrt(numpy.dot(zvec, zvec))
return numpy.divide(zvec, length) | 0.008296 |
def is_new_namespace_preorder( self, namespace_id_hash, lastblock=None ):
"""
Given a namespace preorder hash, determine whether or not is is unseen before.
"""
if lastblock is None:
lastblock = self.lastblock
preorder = namedb_get_namespace_preorder( self.db, namespace_id_hash, lastblock )
if preorder is not None:
return False
else:
return True | 0.022624 |
def remove_page(self, route):
"""Remove a proxied page from the Web UI.
Parameters
----------
route : str
The route for the proxied page. Must be a valid path *segment* in a
url (e.g. ``foo`` in ``/foo/bar/baz``). Routes must be unique
across the application.
"""
req = proto.RemoveProxyRequest(route=route)
self._client._call('RemoveProxy', req) | 0.004556 |
def get_cached_moderated_reddits(self):
"""Return a cached dictionary of the user's moderated reddits.
This list is used internally. Consider using the `get_my_moderation`
function instead.
"""
if self._mod_subs is None:
self._mod_subs = {'mod': self.reddit_session.get_subreddit('mod')}
for sub in self.reddit_session.get_my_moderation(limit=None):
self._mod_subs[six.text_type(sub).lower()] = sub
return self._mod_subs | 0.003922 |
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
result = None
cont = self.input.payload
serialization.write_all(
str(self.resolve_option("output")),
[cont.get("Model").jobject, cont.get("Header").jobject])
return result | 0.005025 |
def _transform_list_args(self, args):
# type: (dict) -> None
"""Transforms all list arguments from json-server to model-resource ones.
This modifies the given arguments.
"""
if '_limit' in args:
args['limit'] = int(args['_limit'])
del args['_limit']
if '_page' in args:
page = int(args['_page'])
if page < 0:
page = 1
args['page'] = page
del args['_page']
if 'limit' not in args:
args['limit'] = 10
if '_end' in args:
end = int(args['_end'])
args['limit'] = end - int(args.get('_start', 0))
if '_start' in args:
args['offset'] = args['_start']
del args['_start']
if '_sort' in args:
args['order_by'] = args['_sort'].replace('__', '.')
del args['_sort']
if args.get('_order', 'ASC') == 'DESC':
args['order_by'] = '-' + args['order_by']
if '_order' in args:
del args['_order']
filter_by = self._create_filter_by()
if filter_by:
args['filter_by'] = filter_by | 0.003328 |
def to_dict(self, remove_nones=False):
"""Return the dict representation of the instance.
Args:
remove_nones (bool, optional): Optionally remove dictionary
elements when their value is `None`.
Returns:
dict: a dict representation of the `DidlObject`.
"""
content = {}
# Get the value of each attribute listed in _translation, and add it
# to the content dict
for key in self._translation:
if hasattr(self, key):
content[key] = getattr(self, key)
# also add parent_id, item_id, restricted, title and resources because
# they are not listed in _translation
content['parent_id'] = self.parent_id
content['item_id'] = self.item_id
content['restricted'] = self.restricted
content['title'] = self.title
if self.resources != []:
content['resources'] = [resource.to_dict(remove_nones=remove_nones)
for resource in self.resources]
content['desc'] = self.desc
return content | 0.001791 |
def load_file(self, filename):
"""Load config from a YAML file."""
filename = os.path.abspath(filename)
with open(filename) as f:
self.load_dict(yaml.load(f))
self._loaded_files.append(filename) | 0.008333 |
def query_phenomizer(usr, pwd, *hpo_terms):
"""
Query the phenomizer web tool
Arguments:
usr (str): A username for phenomizer
pwd (str): A password for phenomizer
hpo_terms (list): A list with hpo terms
Returns:
raw_answer : The raw result from phenomizer
"""
base_string = 'http://compbio.charite.de/phenomizer/phenomizer/PhenomizerServiceURI'
questions = {'mobilequery':'true', 'terms':','.join(hpo_terms), 'username':usr, 'password':pwd}
try:
r = requests.get(base_string, params=questions, timeout=10)
except requests.exceptions.Timeout:
raise RuntimeError("The request timed out.")
if not r.status_code == requests.codes.ok:
raise RuntimeError("Phenomizer returned a bad status code: %s" % r.status_code)
r.encoding = 'utf-8'
return r | 0.014891 |
def getConnectorStats(self):
"""Return dictionary of Connector Stats for Apache Tomcat Server.
@return: Nested dictionary of Connector Stats.
"""
if self._statusxml is None:
self.initStats()
connnodes = self._statusxml.findall('connector')
connstats = {}
if connnodes:
for connnode in connnodes:
namestr = connnode.get('name')
if namestr is not None:
mobj = re.match('(.*)-(\d+)', namestr)
if mobj:
proto = mobj.group(1)
port = int(mobj.group(2))
connstats[port] = {'proto': proto}
for tag in ('threadInfo', 'requestInfo'):
stats = {}
node = connnode.find(tag)
if node is not None:
for (key,val) in node.items():
if re.search('Time$', key):
stats[key] = float(val) / 1000.0
else:
stats[key] = util.parse_value(val)
if stats:
connstats[port][tag] = stats
return connstats | 0.004373 |
def list_all_files(i):
"""
Input: {
path - top level path
(file_name) - search for a specific file name
(pattern) - return only files with this pattern
(path_ext) - path extension (needed for recursion)
(limit) - limit number of files (if directories with a large number of files)
(number) - current number of files
(all) - if 'yes' do not ignore special directories (like .cm)
(ignore_names) - list of names to ignore
(ignore_symb_dirs) - if 'yes', ignore symbolically linked dirs
(to avoid recursion such as in LLVM)
(add_path) - if 'yes', add path
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
list - dictionary of all files:
{"file_with_full_path":{"size":.., "path":..}
sizes - sizes of files (the same order)
number - number of files in a current directory (needed for recursion)
}
"""
number=0
if i.get('number','')!='':
number=int(i['number'])
inames=i.get('ignore_names',[])
fname=i.get('file_name','')
limit=-1
if i.get('limit','')!='':
limit=int(i['limit'])
a={}
iall=i.get('all','')
pe=''
if i.get('path_ext','')!='':
pe=i['path_ext']
po=i.get('path','')
if sys.version_info[0]<3: po=unicode(po)
pattern=i.get('pattern','')
if pattern!='':
import fnmatch
xisd=i.get('ignore_symb_dirs','')
isd=False
if xisd=='yes': isd=True
ap=i.get('add_path','')
try:
dirList=os.listdir(po)
except Exception as e:
None
else:
for fn in dirList:
p=os.path.join(po, fn)
if iall=='yes' or fn not in cfg['special_directories']:
if len(inames)==0 or fn not in inames:
if os.path.isdir(p):
if not isd or os.path.realpath(p)==p:
r=list_all_files({'path':p, 'all':iall, 'path_ext':os.path.join(pe, fn),
'number':str(number), 'ignore_names':inames, 'pattern':pattern,
'file_name':fname, 'ignore_symb_dirs':xisd, 'add_path':ap, 'limit': limit})
if r['return']>0: return r
a.update(r['list'])
else:
add=True
if fname!='' and fname!=fn:
add=False
if pattern!='' and not fnmatch.fnmatch(fn, pattern):
add=False
if add:
pg=os.path.join(pe, fn)
if os.path.isfile(p):
a[pg]={'size':os.stat(p).st_size}
if ap=='yes': a[pg]['path']=po
number=len(a)
if limit!=-1 and number>=limit:
break
return {'return':0, 'list':a, 'number':str(number)} | 0.029815 |
def read_interactions(path, comments="#", directed=False, delimiter=None,
nodetype=None, timestamptype=None, encoding='utf-8', keys=False):
"""Read a DyNetx graph from interaction list format.
Parameters
----------
path : basestring
The desired output filename
delimiter : character
Column delimiter
"""
ids = None
lines = (line.decode(encoding) for line in path)
if keys:
ids = read_ids(path.name, delimiter=delimiter, timestamptype=timestamptype)
return parse_interactions(lines, comments=comments, directed=directed, delimiter=delimiter, nodetype=nodetype,
timestamptype=timestamptype, keys=ids) | 0.00813 |
def get_signed_url(self, params):
'''Returns a Premier account signed url.'''
params['client'] = self.client_id
url_params = {'protocol': self.protocol, 'domain': self.domain,
'service': self.service, 'params': urlencode(params)}
secret = base64.urlsafe_b64decode(self.secret_key)
url_params['url_part'] = (
'/maps/api/%(service)s/json?%(params)s' % url_params)
signature = hmac.new(secret, url_params['url_part'], hashlib.sha1)
url_params['signature'] = base64.urlsafe_b64encode(signature.digest())
return ('%(protocol)s://%(domain)s%(url_part)s'
'&signature=%(signature)s' % url_params) | 0.004132 |
def enhance_function_signatures(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]:
"""Enhance function signatures
Add required and optional objects to signatures objects for semantic validation
support.
Args:
spec_dict (Mapping[str, Any]): bel specification dictionary
Returns:
Mapping[str, Any]: return enhanced bel specification dict
"""
for func in spec_dict["functions"]["signatures"]:
for i, sig in enumerate(spec_dict["functions"]["signatures"][func]["signatures"]):
args = sig["arguments"]
req_args = []
pos_args = []
opt_args = []
mult_args = []
for arg in args:
# Multiple argument types
if arg.get("multiple", False):
if arg["type"] in ["Function", "Modifier"]:
mult_args.extend(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
# Complex signature has this
mult_args.append(arg["type"])
# Optional, position dependent - will be added after req_args based on order in bel_specification
elif arg.get("optional", False) and arg.get("position", False):
if arg["type"] in ["Function", "Modifier"]:
pos_args.append(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
pos_args.append(arg["type"])
# Optional, position independent
elif arg.get("optional", False):
if arg["type"] in ["Function", "Modifier"]:
opt_args.extend(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
opt_args.append(arg["type"])
# Required arguments, position dependent
else:
if arg["type"] in ["Function", "Modifier"]:
req_args.append(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
req_args.append(arg["type"])
spec_dict["functions"]["signatures"][func]["signatures"][i]["req_args"] = copy.deepcopy(
req_args
)
spec_dict["functions"]["signatures"][func]["signatures"][i]["pos_args"] = copy.deepcopy(
pos_args
)
spec_dict["functions"]["signatures"][func]["signatures"][i]["opt_args"] = copy.deepcopy(
opt_args
)
spec_dict["functions"]["signatures"][func]["signatures"][i][
"mult_args"
] = copy.deepcopy(mult_args)
return spec_dict | 0.002831 |
def Sample(self, task, status):
"""Takes a sample of the status of a task for profiling.
Args:
task (Task): a task.
status (str): status.
"""
sample_time = time.time()
sample = '{0:f}\t{1:s}\t{2:s}\n'.format(
sample_time, task.identifier, status)
self._WritesString(sample) | 0.003145 |
def smart_search_prefix(self, auth, query_str, search_options=None, extra_query=None):
""" Perform a smart search on prefix list.
* `auth` [BaseAuth]
AAA options.
* `query_str` [string]
Search string
* `search_options` [options_dict]
Search options. See :func:`search_prefix`.
* `extra_query` [dict_to_sql]
Extra search terms, will be AND:ed together with what is
extracted from the query string.
Return a dict with three elements:
* :attr:`interpretation` - How the query string was interpreted.
* :attr:`search_options` - Various search_options.
* :attr:`result` - The search result.
The :attr:`interpretation` is given as a list of dicts, each
explaining how a part of the search key was interpreted (ie. what
prefix attribute the search operation was performed on).
The :attr:`result` is a list of dicts containing the search result.
The smart search function tries to convert the query from a text
string to a `query` dict which is passed to the
:func:`search_prefix` function. If multiple search keys are
detected, they are combined with a logical AND.
It tries to automatically detect IP addresses and prefixes and put
these into the `query` dict with "contains_within" operators and so
forth.
See the :func:`search_prefix` function for an explanation of the
`search_options` argument.
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.smart_search_prefix` for full
understanding.
"""
if search_options is None:
search_options = {}
self._logger.debug("smart_search_prefix query string: %s" % query_str)
success, query = self._parse_prefix_query(query_str)
if not success:
return {
'interpretation': query,
'search_options': search_options,
'result': [],
'error': True,
'error_message': 'query interpretation failed'
}
if extra_query is not None:
query = {
'operator': 'and',
'val1': query,
'val2': extra_query
}
self._logger.debug("smart_search_prefix: query expanded to: %s" % unicode(query))
search_result = self.search_prefix(auth, query, search_options)
search_result['interpretation'] = query
search_result['error'] = False
return search_result | 0.002429 |
def _seconds_to_section_split(record, sections):
"""
Finds the seconds to the next section from the datetime of a record.
"""
next_section = sections[
bisect_right(sections, _find_weektime(record.datetime))] * 60
return next_section - _find_weektime(record.datetime, time_type='sec') | 0.003205 |
def extract_jtl_string_pairs_from_text_file(results_dict, file_path):
""" Extracts all string pairs matching the JTL pattern from given text file.
This can be used as an "extract_func" argument in the extract_string_pairs_in_directory method.
Args:
results_dict (dict): The dict to add the the string pairs to.
file_path (str): The path of the file from which to extract the string pairs.
"""
result_pairs = re.findall(JTL_REGEX, open(file_path).read())
for result_key, result_comment in result_pairs:
results_dict[result_key] = result_comment
return results_dict | 0.006472 |
def natsort_key(s, number_type=int, signed=False, exp=False):
"""\
Key to sort strings and numbers naturally, not lexicographically.
It also has basic support for version numbers.
For use in passing to the :py:func:`sorted` builtin or
:py:meth:`sort` attribute of lists.
Use natsort_key just like any other sorting key.
>>> a = ['num3', 'num5', 'num2']
>>> a.sort(key=natsort_key)
>>> a
['num2', 'num3', 'num5']
Below illustrates how the key works, and how the different options affect sorting.
>>> natsort_key('a-5.034e1')
('a-', 5, '.', 34, 'e', 1)
>>> natsort_key('a-5.034e1', number_type=float, signed=True, exp=True)
('a', -50.34)
>>> natsort_key('a-5.034e1', number_type=float, signed=True, exp=False)
('a', -5.034, 'e', 1.0)
>>> natsort_key('a-5.034e1', number_type=float, signed=False, exp=True)
('a-', 50.34)
>>> natsort_key('a-5.034e1', number_type=float, signed=False, exp=False)
('a-', 5.034, 'e', 1.0)
>>> natsort_key('a-5.034e1', number_type=int, signed=True)
('a', -5, '.', 34, 'e', 1)
>>> natsort_key('a-5.034e1', number_type=int, signed=False)
('a-', 5, '.', 34, 'e', 1)
>>> natsort_key('a-5.034e1', number_type=int, exp=False)
('a-', 5, '.', 34, 'e', 1)
>>> natsort_key('a-5.034e1', number_type=None)
('a-', 5, '.', 34, 'e', 1)
This is a demonstration of what number_type=None works.
>>> natsort_key('a-5.034e1', number_type=None) == natsort_key('a-5.034e1', number_type=None, signed=False)
True
>>> natsort_key('a-5.034e1', number_type=None) == natsort_key('a-5.034e1', number_type=None, exp=False)
True
>>> natsort_key('a-5.034e1', number_type=None) == natsort_key('a-5.034e1', number_type=int, signed=False)
True
Iterables are parsed recursively so you can sort lists of lists.
>>> natsort_key(('a1', 'a10'))
(('a', 1), ('a', 10))
Strings that lead with a number get an empty string at the front of the tuple.
This is designed to get around the "unorderable types" issue.
>>> natsort_key(('15a', '6'))
(('', 15, 'a'), ('', 6))
You can give numbers, too.
>>> natsort_key(10)
('', 10)
"""
# If we are dealing with non-strings, return now
if not isinstance(s, six.string_types):
if hasattr(s, '__getitem__'):
return tuple(natsort_key(x) for x in s)
else:
return ('', s,)
# Convert to the proper tuple and return
inp_options = (number_type, signed, exp)
args = (s,) + regex_and_num_function_chooser[inp_options]
try:
return tuple(_number_finder(*args))
except KeyError:
# Report errors properly
if number_type not in (float, int) or number_type is not None:
raise ValueError("natsort_key: 'number_type' "
"parameter '{0}'' invalid".format(str(number_type)))
elif signed not in (True, False):
raise ValueError("natsort_key: 'signed' "
"parameter '{0}'' invalid".format(str(signed)))
elif exp not in (True, False):
raise ValueError("natsort_key: 'exp' "
"parameter '{0}'' invalid".format(str(exp))) | 0.002365 |
def create_schema(host):
"""
Create exchanges, queues and route them.
Args:
host (str): One of the possible hosts.
"""
connection = create_blocking_connection(host)
channel = connection.channel()
exchange = settings.get_amqp_settings()[host]["exchange"]
channel.exchange_declare(
exchange=exchange,
exchange_type="topic",
durable=True
)
print "Created exchange '%s'." % exchange
print "Creating queues:"
queues = settings.get_amqp_settings()[host]["queues"]
for queue in queues.keys():
channel.queue_declare(
queue=queue,
durable=True,
# arguments={'x-message-ttl': int(1000 * 60 * 60 * 24)} # :S
)
print "\tCreated durable queue '%s'." % queue
print
print "Routing exchanges using routing key to queues:"
for queue in queues.keys():
channel.queue_bind(
queue=queue,
exchange=exchange,
routing_key=queues[queue]
)
print "\tRouting exchange %s['%s'] -> '%s'." % (
exchange,
queues[queue],
queue
) | 0.000861 |
def email(self, to, msg):
"""
Quickly send an email from a default address. Calls :py:meth:`gmail_email`.
* *stored credential name: GMAIL_EMAIL*
:param string to: The email address to send the email to.
:param msg: The content of the email. See :py:meth:`gmail_email`.
"""
logging.debug('Emailing someone')
return self.gmail_email(self._credentials['GMAIL_EMAIL'], to, msg) | 0.013129 |
def _compute_fixed_point_ig(T, v, max_iter, verbose, print_skip, is_approx_fp,
*args, **kwargs):
"""
Implement the imitation game algorithm by McLennan and Tourky (2006)
for computing an approximate fixed point of `T`.
Parameters
----------
is_approx_fp : callable
A callable with signature `is_approx_fp(v)` which determines
whether `v` is an approximate fixed point with a bool return
value (i.e., True or False)
For the other parameters, see Parameters in compute_fixed_point.
Returns
-------
x_new : scalar(float) or ndarray(float)
Approximate fixed point.
converged : bool
Whether the routine has converged.
iterate : scalar(int)
Number of iterations.
"""
if verbose == 2:
start_time = time.time()
_print_after_skip(print_skip, it=None)
x_new = v
y_new = T(x_new, *args, **kwargs)
iterate = 1
converged = is_approx_fp(x_new)
if converged or iterate >= max_iter:
if verbose == 2:
error = np.max(np.abs(y_new - x_new))
etime = time.time() - start_time
print_skip = 1
_print_after_skip(print_skip, iterate, error, etime)
if verbose >= 1:
if not converged:
warnings.warn(_non_convergence_msg, RuntimeWarning)
elif verbose == 2:
print(_convergence_msg.format(iterate=iterate))
return x_new, converged, iterate
if verbose == 2:
error = np.max(np.abs(y_new - x_new))
etime = time.time() - start_time
_print_after_skip(print_skip, iterate, error, etime)
# Length of the arrays to store the computed sequences of x and y.
# If exceeded, reset to min(max_iter, buff_size*2).
buff_size = 2**8
buff_size = min(max_iter, buff_size)
shape = (buff_size,) + np.asarray(x_new).shape
X, Y = np.empty(shape), np.empty(shape)
X[0], Y[0] = x_new, y_new
x_new = Y[0]
tableaux = tuple(np.empty((buff_size, buff_size*2+1)) for i in range(2))
bases = tuple(np.empty(buff_size, dtype=int) for i in range(2))
max_piv = 10**6 # Max number of pivoting steps in _lemke_howson_tbl
while True:
y_new = T(x_new, *args, **kwargs)
iterate += 1
converged = is_approx_fp(x_new)
if converged or iterate >= max_iter:
break
if verbose == 2:
error = np.max(np.abs(y_new - x_new))
etime = time.time() - start_time
_print_after_skip(print_skip, iterate, error, etime)
try:
X[iterate-1] = x_new
Y[iterate-1] = y_new
except IndexError:
buff_size = min(max_iter, buff_size*2)
shape = (buff_size,) + X.shape[1:]
X_tmp, Y_tmp = X, Y
X, Y = np.empty(shape), np.empty(shape)
X[:X_tmp.shape[0]], Y[:Y_tmp.shape[0]] = X_tmp, Y_tmp
X[iterate-1], Y[iterate-1] = x_new, y_new
tableaux = tuple(np.empty((buff_size, buff_size*2+1))
for i in range(2))
bases = tuple(np.empty(buff_size, dtype=int) for i in range(2))
m = iterate
tableaux_curr = tuple(tableau[:m, :2*m+1] for tableau in tableaux)
bases_curr = tuple(basis[:m] for basis in bases)
_initialize_tableaux_ig(X[:m], Y[:m], tableaux_curr, bases_curr)
converged, num_iter = _lemke_howson_tbl(
tableaux_curr, bases_curr, init_pivot=m-1, max_iter=max_piv
)
_, rho = _get_mixed_actions(tableaux_curr, bases_curr)
if Y.ndim <= 2:
x_new = rho.dot(Y[:m])
else:
shape_Y = Y.shape
Y_2d = Y.reshape(shape_Y[0], np.prod(shape_Y[1:]))
x_new = rho.dot(Y_2d[:m]).reshape(shape_Y[1:])
if verbose == 2:
error = np.max(np.abs(y_new - x_new))
etime = time.time() - start_time
print_skip = 1
_print_after_skip(print_skip, iterate, error, etime)
if verbose >= 1:
if not converged:
warnings.warn(_non_convergence_msg, RuntimeWarning)
elif verbose == 2:
print(_convergence_msg.format(iterate=iterate))
return x_new, converged, iterate | 0.000234 |
def split_call(lines, open_paren_line=0):
"""Returns a 2-tuple where the first element is the list of lines from the
first open paren in lines to the matching closed paren. The second element
is all remaining lines in a list."""
num_open = 0
num_closed = 0
for i, line in enumerate(lines):
c = line.count('(')
num_open += c
if not c and i==open_paren_line:
raise Exception('Exception open parenthesis in line %d but there is not one there: %s' % (i, str(lines)))
num_closed += line.count(')')
if num_open == num_closed:
return (lines[:i+1], lines[i+1:])
print(''.join(lines))
raise Exception('parenthesis are mismatched (%d open, %d closed found)' % (num_open, num_closed)) | 0.005181 |
def local_bifurcation_angles(neurites, neurite_type=NeuriteType.all):
'''Get a list of local bifurcation angles in a collection of neurites'''
return map_sections(_bifurcationfunc.local_bifurcation_angle,
neurites,
neurite_type=neurite_type,
iterator_type=Tree.ibifurcation_point) | 0.002778 |
def setMaximum(self, maximum):
"""setter to _maximum.
Args:
maximum (int or long): new _maximum value
"""
if not isinstance(maximum, int):
raise TypeError("Argument is not of type int or long")
self._maximum = maximum | 0.007092 |
def empty(cls):
"""
Returns an empty set. An empty set is unbounded and only contain the
empty set.
>>> intrange.empty() in intrange.empty()
True
It is unbounded but the boundaries are not infinite. Its boundaries are
returned as ``None``. Every set contains the empty set.
"""
self = cls.__new__(cls)
self._range = _empty_internal_range
return self | 0.004464 |
def remover(self, id_tipo_acesso):
"""Removes access type by its identifier.
:param id_tipo_acesso: Access type identifier.
:return: None
:raise TipoAcessoError: Access type associated with equipment, cannot be removed.
:raise InvalidParameterError: Protocol value is invalid or none.
:raise TipoAcessoNaoExisteError: Access type doesn't exist.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_tipo_acesso):
raise InvalidParameterError(
u'Access type id is invalid or was not informed.')
url = 'tipoacesso/' + str(id_tipo_acesso) + '/'
code, xml = self.submit(None, 'DELETE', url)
return self.response(code, xml) | 0.003476 |
def add_to_group(self, group_path):
"""Add a device to a group, if the group doesn't exist it is created
:param group_path: Path or "name" of the group
"""
if self.get_group_path() != group_path:
post_data = ADD_GROUP_TEMPLATE.format(connectware_id=self.get_connectware_id(),
group_path=group_path)
self._conn.put('/ws/DeviceCore', post_data)
# Invalidate cache
self._device_json = None | 0.00578 |
def next(self):
"""Move to the next token in the token stream."""
self.current_token = next(self.token_stream, None)
if self.current_token is None:
self.token_span = self.token_span[1], self.token_span[1]
raise self.error('Unexpected end of input')
self.token_span = self.current_token.span
return self | 0.005464 |
def CODECOPY(self, mem_offset, code_offset, size):
"""Copy code running in current environment to memory"""
self._allocate(mem_offset, size)
GCOPY = 3 # cost to copy one 32 byte word
copyfee = self.safe_mul(GCOPY, Operators.UDIV(self.safe_add(size, 31), 32))
self._consume(copyfee)
if issymbolic(size):
max_size = solver.max(self.constraints, size)
else:
max_size = size
for i in range(max_size):
if issymbolic(i < size):
default = Operators.ITEBV(8, i < size, 0, self._load(mem_offset + i, 1)) # Fixme. unnecessary memory read
else:
if i < size:
default = 0
else:
default = self._load(mem_offset + i, 1)
if issymbolic(code_offset):
value = Operators.ITEBV(8, code_offset + i >= len(self.bytecode), default, self.bytecode[code_offset + i])
else:
if code_offset + i >= len(self.bytecode):
value = default
else:
value = self.bytecode[code_offset + i]
self._store(mem_offset + i, value)
self._publish('did_evm_read_code', code_offset, size) | 0.003888 |
def setUserKeyCredentials(self, username, public_key=None, private_key=None):
"""Set these properties in ``disk.0.os.credentials``."""
self.setCredentialValues(username=username, public_key=public_key, private_key=private_key) | 0.012346 |
def get_field_class(qs, field_name):
"""
Given a queryset and a field name, it will return the field's class
"""
try:
return qs.model._meta.get_field(field_name).__class__.__name__
# while annotating, it's possible that field does not exists.
except FieldDoesNotExist:
return None | 0.003125 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.