text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def delete_forwarding_address(payment_id, coin_symbol='btc', api_key=None):
'''
Delete a forwarding address on a specific blockchain, using its
payment id
'''
assert payment_id, 'payment_id required'
assert is_valid_coin_symbol(coin_symbol)
assert api_key, 'api_key required'
params = {'token': api_key}
url = make_url(**dict(payments=payment_id))
r = requests.delete(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS)
return get_valid_json(r, allow_204=True) | 0.003868 |
def create_dataset(self, dataset, exists_ok=False, retry=DEFAULT_RETRY):
"""API call: create the dataset via a POST request.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/insert
Args:
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A :class:`~google.cloud.bigquery.dataset.Dataset` to create.
If ``dataset`` is a reference, an empty dataset is created
with the specified ID and client's default location.
exists_ok (bool):
Defaults to ``False``. If ``True``, ignore "already exists"
errors when creating the dataset.
retry (google.api_core.retry.Retry):
Optional. How to retry the RPC.
Returns:
google.cloud.bigquery.dataset.Dataset:
A new ``Dataset`` returned from the API.
Example:
>>> from google.cloud import bigquery
>>> client = bigquery.Client()
>>> dataset = bigquery.Dataset(client.dataset('my_dataset'))
>>> dataset = client.create_dataset(dataset)
"""
if isinstance(dataset, str):
dataset = DatasetReference.from_string(
dataset, default_project=self.project
)
if isinstance(dataset, DatasetReference):
dataset = Dataset(dataset)
path = "/projects/%s/datasets" % (dataset.project,)
data = dataset.to_api_repr()
if data.get("location") is None and self.location is not None:
data["location"] = self.location
try:
api_response = self._call_api(retry, method="POST", path=path, data=data)
return Dataset.from_api_repr(api_response)
except google.api_core.exceptions.Conflict:
if not exists_ok:
raise
return self.get_dataset(dataset.reference, retry=retry) | 0.001433 |
def start_archive(self, session_id, has_audio=True, has_video=True, name=None, output_mode=OutputModes.composed, resolution=None):
"""
Starts archiving an OpenTok session.
Clients must be actively connected to the OpenTok session for you to successfully start
recording an archive.
You can only record one archive at a time for a given session. You can only record archives
of sessions that use the OpenTok Media Router (sessions with the media mode set to routed);
you cannot archive sessions with the media mode set to relayed.
For more information on archiving, see the
`OpenTok archiving <https://tokbox.com/opentok/tutorials/archiving/>`_ programming guide.
:param String session_id: The session ID of the OpenTok session to archive.
:param String name: This is the name of the archive. You can use this name
to identify the archive. It is a property of the Archive object, and it is a property
of archive-related events in the OpenTok.js library.
:param Boolean has_audio: if set to True, an audio track will be inserted to the archive.
has_audio is an optional parameter that is set to True by default. If you set both
has_audio and has_video to False, the call to the start_archive() method results in
an error.
:param Boolean has_video: if set to True, a video track will be inserted to the archive.
has_video is an optional parameter that is set to True by default.
:param OutputModes output_mode: Whether all streams in the archive are recorded
to a single file (OutputModes.composed, the default) or to individual files
(OutputModes.individual).
:param String resolution (Optional): The resolution of the archive, either "640x480" (the default)
or "1280x720". This parameter only applies to composed archives. If you set this
parameter and set the output_mode parameter to OutputModes.individual, the call to the
start_archive() method results in an error.
:rtype: The Archive object, which includes properties defining the archive,
including the archive ID.
"""
if not isinstance(output_mode, OutputModes):
raise OpenTokException(u('Cannot start archive, {0} is not a valid output mode').format(output_mode))
if resolution and output_mode == OutputModes.individual:
raise OpenTokException(u('Invalid parameters: Resolution cannot be supplied for individual output mode.'))
payload = {'name': name,
'sessionId': session_id,
'hasAudio': has_audio,
'hasVideo': has_video,
'outputMode': output_mode.value,
'resolution': resolution,
}
response = requests.post(self.endpoints.archive_url(), data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
return Archive(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 400:
"""
The HTTP response has a 400 status code in the following cases:
You do not pass in a session ID or you pass in an invalid session ID.
No clients are actively connected to the OpenTok session.
You specify an invalid resolution value.
The outputMode property is set to "individual" and you set the resolution property and (which is not supported in individual stream archives).
"""
raise RequestError(response.json().get("message"))
elif response.status_code == 404:
raise NotFoundError("Session not found")
elif response.status_code == 409:
raise ArchiveError(response.json().get("message"))
else:
raise RequestError("An unexpected error occurred", response.status_code) | 0.006907 |
def from_response(response):
"""Returns the correct error type from a ::class::`Response` object."""
if response.code:
return ERRORS[response.code](response)
else:
return Error(response) | 0.008547 |
def get_problem_name(base_name, was_reversed=False, was_copy=False):
"""Construct a problem name from base and reversed/copy options.
Inverse of `parse_problem_name`.
Args:
base_name: base problem name. Should not end in "_rev" or "_copy"
was_reversed: if the problem is to be reversed
was_copy: if the problem is to be copied
Returns:
string name consistent with use with `parse_problem_name`.
Raises:
ValueError if `base_name` ends with "_rev" or "_copy"
"""
if any(base_name.endswith(suffix) for suffix in ("_rev", "_copy")):
raise ValueError("`base_name` cannot end in '_rev' or '_copy'")
name = base_name
if was_copy:
name = "%s_copy" % name
if was_reversed:
name = "%s_rev" % name
return name | 0.009259 |
def fetch_data(self):
"""Fetch the latest data from Fido."""
# Get http session
yield from self._get_httpsession()
# Post login page
yield from self._post_login_page()
# Get token
token_uuid = yield from self._get_token()
# Get account number
account_number = yield from self._get_account_number(*token_uuid)
# List phone numbers
self._phone_numbers = yield from self._list_phone_numbers(account_number)
# Get balance
balance = yield from self._get_balance(account_number)
self._data['balance'] = balance
# Get fido dollar
for number in self._phone_numbers:
fido_dollar = yield from self._get_fido_dollar(account_number,
number)
self._data[number]= {'fido_dollar': fido_dollar}
# Get usage
for number in self._phone_numbers:
usage = yield from self._get_usage(account_number, number)
self._data[number].update(usage) | 0.003749 |
def create_repo(self, name, description=github.GithubObject.NotSet, homepage=github.GithubObject.NotSet,
private=github.GithubObject.NotSet, has_issues=github.GithubObject.NotSet,
has_wiki=github.GithubObject.NotSet, has_downloads=github.GithubObject.NotSet,
has_projects=github.GithubObject.NotSet, auto_init=github.GithubObject.NotSet, license_template=github.GithubObject.NotSet,
gitignore_template=github.GithubObject.NotSet, allow_squash_merge=github.GithubObject.NotSet,
allow_merge_commit=github.GithubObject.NotSet, allow_rebase_merge=github.GithubObject.NotSet):
"""
:calls: `POST /user/repos <http://developer.github.com/v3/repos>`_
:param name: string
:param description: string
:param homepage: string
:param private: bool
:param has_issues: bool
:param has_wiki: bool
:param has_downloads: bool
:param has_projects: bool
:param auto_init: bool
:param license_template: string
:param gitignore_template: string
:param allow_squash_merge: bool
:param allow_merge_commit: bool
:param allow_rebase_merge: bool
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, (str, unicode)), name
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert homepage is github.GithubObject.NotSet or isinstance(homepage, (str, unicode)), homepage
assert private is github.GithubObject.NotSet or isinstance(private, bool), private
assert has_issues is github.GithubObject.NotSet or isinstance(has_issues, bool), has_issues
assert has_wiki is github.GithubObject.NotSet or isinstance(has_wiki, bool), has_wiki
assert has_downloads is github.GithubObject.NotSet or isinstance(has_downloads, bool), has_downloads
assert has_projects is github.GithubObject.NotSet or isinstance(has_projects, bool), has_projects
assert auto_init is github.GithubObject.NotSet or isinstance(auto_init, bool), auto_init
assert license_template is github.GithubObject.NotSet or isinstance(license_template, (str, unicode)), license_template
assert gitignore_template is github.GithubObject.NotSet or isinstance(gitignore_template, (str, unicode)), gitignore_template
assert allow_squash_merge is github.GithubObject.NotSet or isinstance(allow_squash_merge, bool), allow_squash_merge
assert allow_merge_commit is github.GithubObject.NotSet or isinstance(allow_merge_commit, bool), allow_merge_commit
assert allow_rebase_merge is github.GithubObject.NotSet or isinstance(allow_rebase_merge, bool), allow_rebase_merge
post_parameters = {
"name": name,
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if homepage is not github.GithubObject.NotSet:
post_parameters["homepage"] = homepage
if private is not github.GithubObject.NotSet:
post_parameters["private"] = private
if has_issues is not github.GithubObject.NotSet:
post_parameters["has_issues"] = has_issues
if has_wiki is not github.GithubObject.NotSet:
post_parameters["has_wiki"] = has_wiki
if has_downloads is not github.GithubObject.NotSet:
post_parameters["has_downloads"] = has_downloads
if has_projects is not github.GithubObject.NotSet:
post_parameters["has_projects"] = has_projects
if auto_init is not github.GithubObject.NotSet:
post_parameters["auto_init"] = auto_init
if license_template is not github.GithubObject.NotSet:
post_parameters["license_template"] = license_template
if gitignore_template is not github.GithubObject.NotSet:
post_parameters["gitignore_template"] = gitignore_template
if allow_squash_merge is not github.GithubObject.NotSet:
post_parameters["allow_squash_merge"] = allow_squash_merge
if allow_merge_commit is not github.GithubObject.NotSet:
post_parameters["allow_merge_commit"] = allow_merge_commit
if allow_rebase_merge is not github.GithubObject.NotSet:
post_parameters["allow_rebase_merge"] = allow_rebase_merge
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/repos",
input=post_parameters
)
return github.Repository.Repository(self._requester, headers, data, completed=True) | 0.005762 |
def output(output_id, name, value_class=NumberValue):
"""Add output to controller"""
def _init():
return value_class(
name,
input_id=output_id,
is_input=False,
index=-1
)
def _decorator(cls):
setattr(cls, output_id, _init())
return cls
return _decorator | 0.007557 |
def _check_if_ins_is_dup(self, start, insertion):
"""Helper to identify an insertion as a duplicate
:param start: 1-based insertion start
:type start: int
:param insertion: sequence
:type insertion: str
:return (is duplicate, variant start)
:rtype (bool, int)
"""
is_dup = False # assume no
variant_start = None
dup_candidate_start = start - len(insertion) - 1
dup_candidate = self._ref_seq[dup_candidate_start:dup_candidate_start + len(insertion)]
if insertion == dup_candidate:
is_dup = True
variant_start = dup_candidate_start + 1
return is_dup, variant_start | 0.004261 |
async def vafter(self):
"""Function that is called after a song finishes playing"""
self.logger.debug("Finished playing a song")
if self.state != 'ready':
self.logger.debug("Returning because player is in state {}".format(self.state))
return
self.pause_time = None
if self.vclient_task:
loop = asyncio.get_event_loop()
loop.call_soon(self.vclient_task.cancel)
self.vclient_task = None
try:
if self.streamer is None:
await self.stop()
return
if self.streamer.error is None:
await self.vplay()
else:
self.statuslog.error(self.streamer.error)
await self.destroy()
except Exception as e:
logger.exception(e)
try:
await self.destroy()
except Exception as e:
logger.exception(e) | 0.003074 |
def selected_urls(self):
"""
Gets the list of selected items file path (url)
"""
urls = []
debug('gettings urls')
for proxy_index in self.tree_view.selectedIndexes():
finfo = self.tree_view.fileInfo(proxy_index)
urls.append(finfo.canonicalFilePath())
debug('selected urls %r' % [str(url) for url in urls])
return urls | 0.004938 |
def not_alived(subset=None, show_ip=False, show_ipv4=None):
'''
.. versionadded:: 2015.8.0
.. versionchanged:: 2019.2.0
The 'show_ipv4' argument has been renamed to 'show_ip' as it now
includes IPv6 addresses for IPv6-connected minions.
Print a list of all minions that are NOT up according to Salt's presence
detection (no commands will be sent)
subset : None
Pass in a CIDR range to filter minions by IP address.
show_ip : False
Also show the IP address each minion is connecting from.
CLI Example:
.. code-block:: bash
salt-run manage.not_alived
'''
show_ip = _show_ip_migration(show_ip, show_ipv4)
return list_not_state(subset=subset, show_ip=show_ip) | 0.001335 |
def range(self, dim, data_range=True, dimension_range=True):
"""Return the lower and upper bounds of values along dimension.
Args:
dimension: The dimension to compute the range on.
data_range (bool): Compute range from data values
dimension_range (bool): Include Dimension ranges
Whether to include Dimension range and soft_range
in range calculation
Returns:
Tuple containing the lower and upper bound
"""
dim = self.get_dimension(dim)
if dim is None or (not data_range and not dimension_range):
return (None, None)
elif all(util.isfinite(v) for v in dim.range) and dimension_range:
return dim.range
elif dim in self.dimensions() and data_range and bool(self):
lower, upper = self.interface.range(self, dim)
else:
lower, upper = (np.NaN, np.NaN)
if not dimension_range:
return lower, upper
return util.dimension_range(lower, upper, dim.range, dim.soft_range) | 0.001835 |
def cache_etag(request, *argz, **kwz):
'''Produce etag value for a cached page.
Intended for usage in conditional views (@condition decorator).'''
response, site, cachekey = kwz.get('_view_data') or initview(request)
if not response: return None
return fjcache.str2md5(
'{0}--{1}--{2}'.format( site.id if site else 'x', cachekey,
response[1].strftime('%Y-%m-%d %H:%M:%S%z') ) ) | 0.033505 |
def validate(self):
"""
Ensure `self.path` has one of the extensions in `self.allowed_formats`.
"""
assert self.path, "{} must have a path".format(self.__class__.__name__)
ext = extract_path_ext(self.path, default_ext=self.default_ext)
if ext not in self.allowed_formats and ext not in CONVERTIBLE_FORMATS[format_presets.VIDEO_HIGH_RES]:
raise ValueError('Incompatible extension {} for VideoFile at {}'.format(ext, self.path)) | 0.00823 |
def __shapefileHeader(self, fileObj, headerType='shp'):
"""Writes the specified header type to the specified file-like object.
Several of the shapefile formats are so similar that a single generic
method to read or write them is warranted."""
f = self.__getFileObj(fileObj)
f.seek(0)
# File code, Unused bytes
f.write(pack(">6i", 9994,0,0,0,0,0))
# File length (Bytes / 2 = 16-bit words)
if headerType == 'shp':
f.write(pack(">i", self.__shpFileLength()))
elif headerType == 'shx':
f.write(pack('>i', ((100 + (len(self._shapes) * 8)) // 2)))
# Version, Shape type
f.write(pack("<2i", 1000, self.shapeType))
# The shapefile's bounding box (lower left, upper right)
if self.shapeType != 0:
try:
f.write(pack("<4d", *self.bbox()))
except error:
raise ShapefileException("Failed to write shapefile bounding box. Floats required.")
else:
f.write(pack("<4d", 0,0,0,0))
# Elevation
z = self.zbox()
# Measure
m = self.mbox()
try:
f.write(pack("<4d", z[0], z[1], m[0], m[1]))
except error:
raise ShapefileException("Failed to write shapefile elevation and measure values. Floats required.") | 0.008602 |
def _check_prompts(pre_prompt, post_prompt):
'''Check that the prompts are strings'''
if not isinstance(pre_prompt, str):
raise TypeError("The pre_prompt was not a string!")
if post_prompt is not _NO_ARG and not isinstance(post_prompt, str):
raise TypeError("The post_prompt was given and was not a string!") | 0.002976 |
def write_input(self, output_dir,
make_dir_if_not_present=True, include_cif=False):
"""
Writes a set of VASP input to a directory.
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
include_cif (bool): Whether to write a CIF file in the output
directory for easier opening by VESTA.
"""
vinput = self.get_vasp_input()
vinput.write_input(
output_dir, make_dir_if_not_present=make_dir_if_not_present)
if include_cif:
s = vinput["POSCAR"].structure
fname = Path(output_dir) / ("%s.cif" % re.sub(r'\s', "", s.formula))
s.to(filename=fname) | 0.00452 |
def build_is_last_day_of_season(num_steps_per_season):
"""Build utility method to compute whether the season is changing."""
num_steps_per_cycle = np.sum(num_steps_per_season)
changepoints = np.cumsum(np.ravel(num_steps_per_season)) - 1
def is_last_day_of_season(t):
t_ = dist_util.maybe_get_static_value(t)
if t_ is not None: # static case
step_in_cycle = t_ % num_steps_per_cycle
return any(step_in_cycle == changepoints)
else:
step_in_cycle = tf.math.floormod(t, num_steps_per_cycle)
return tf.reduce_any(
input_tensor=tf.equal(step_in_cycle, changepoints))
return is_last_day_of_season | 0.017028 |
def _add_meta_info(self, eopatch, request_params, service_type):
""" Adds any missing metadata info to EOPatch """
for param, eoparam in zip(['time', 'time_difference', 'maxcc'], ['time_interval', 'time_difference', 'maxcc']):
if eoparam not in eopatch.meta_info:
eopatch.meta_info[eoparam] = request_params[param]
if 'service_type' not in eopatch.meta_info:
eopatch.meta_info['service_type'] = service_type.value
for param in ['size_x', 'size_y']:
if param not in eopatch.meta_info:
eopatch.meta_info[param] = getattr(self, param)
if eopatch.bbox is None:
eopatch.bbox = request_params['bbox'] | 0.004178 |
def _wait_for_result(self):
"""Wait for the sensor to be ready for measurement."""
basetime = 0.018 if self._low_res else 0.128
sleep(basetime * (self._mtreg / 69.0) + self._delay) | 0.009804 |
def design(npos):
"""
make a design matrix for an anisotropy experiment
"""
if npos == 15:
#
# rotatable design of Jelinek for kappabridge (see Tauxe, 1998)
#
A = np.array([[.5, .5, 0, -1., 0, 0], [.5, .5, 0, 1., 0, 0], [1, .0, 0, 0, 0, 0], [.5, .5, 0, -1., 0, 0], [.5, .5, 0, 1., 0, 0], [0, .5, .5, 0, -1., 0], [0, .5, .5, 0, 1., 0], [0, 1., 0, 0, 0, 0],
[0, .5, .5, 0, -1., 0], [0, .5, .5, 0, 1., 0], [.5, 0, .5, 0, 0, -1.], [.5, 0, .5, 0, 0, 1.], [0, 0, 1., 0, 0, 0], [.5, 0, .5, 0, 0, -1.], [.5, 0, .5, 0, 0, 1.]]) # design matrix for 15 measurment positions
elif npos == 6:
A = np.array([[1., 0, 0, 0, 0, 0], [0, 1., 0, 0, 0, 0], [0, 0, 1., 0, 0, 0], [.5, .5, 0, 1., 0, 0], [
0, .5, .5, 0, 1., 0], [.5, 0, .5, 0, 0, 1.]]) # design matrix for 6 measurment positions
else:
print("measurement protocol not supported yet ")
return
B = np.dot(np.transpose(A), A)
B = linalg.inv(B)
B = np.dot(B, np.transpose(A))
return A, B | 0.00469 |
def start_raylet_monitor(self):
"""Start the raylet monitor."""
stdout_file, stderr_file = self.new_log_files("raylet_monitor")
process_info = ray.services.start_raylet_monitor(
self._redis_address,
stdout_file=stdout_file,
stderr_file=stderr_file,
redis_password=self._ray_params.redis_password,
config=self._config)
assert (ray_constants.PROCESS_TYPE_RAYLET_MONITOR not in
self.all_processes)
self.all_processes[ray_constants.PROCESS_TYPE_RAYLET_MONITOR] = [
process_info
] | 0.003273 |
def read_tuple_ticks(self, symbol, start, end):
''' read ticks as tuple '''
if end is None:
end=sys.maxint
session=self.getReadSession()()
try:
rows=session.query(Tick).filter(and_(Tick.symbol == symbol,
Tick.time >= int(start),
Tick.time < int(end)))
finally:
self.getReadSession().remove()
return [self.__sqlToTupleTick(row) for row in rows] | 0.012727 |
def _exec_calcs(calcs, parallelize=False, client=None, **compute_kwargs):
"""Execute the given calculations.
Parameters
----------
calcs : Sequence of ``aospy.Calc`` objects
parallelize : bool, default False
Whether to submit the calculations in parallel or not
client : distributed.Client or None
The distributed Client used if parallelize is set to True; if None
a distributed LocalCluster is used.
compute_kwargs : dict of keyword arguments passed to ``Calc.compute``
Returns
-------
A list of the values returned by each Calc object that was executed.
"""
if parallelize:
def func(calc):
"""Wrap _compute_or_skip_on_error to require only the calc
argument"""
if 'write_to_tar' in compute_kwargs:
compute_kwargs['write_to_tar'] = False
return _compute_or_skip_on_error(calc, compute_kwargs)
if client is None:
n_workers = _n_workers_for_local_cluster(calcs)
with distributed.LocalCluster(n_workers=n_workers) as cluster:
with distributed.Client(cluster) as client:
result = _submit_calcs_on_client(calcs, client, func)
else:
result = _submit_calcs_on_client(calcs, client, func)
if compute_kwargs['write_to_tar']:
_serial_write_to_tar(calcs)
return result
else:
return [_compute_or_skip_on_error(calc, compute_kwargs)
for calc in calcs] | 0.000654 |
def to_XML(self, xml_declaration=True, xmlns=True):
"""
Dumps object fields to an XML-formatted string. The 'xml_declaration'
switch enables printing of a leading standard XML line containing XML
version and encoding. The 'xmlns' switch enables printing of qualified
XMLNS prefixes.
:param XML_declaration: if ``True`` (default) prints a leading XML
declaration line
:type XML_declaration: bool
:param xmlns: if ``True`` (default) prints full XMLNS prefixes
:type xmlns: bool
:returns: an XML-formatted string
"""
root_node = self._to_DOM()
if xmlns:
xmlutils.annotate_with_XMLNS(root_node,
STATION_HISTORY_XMLNS_PREFIX,
STATION_HISTORY_XMLNS_URL)
return xmlutils.DOM_node_to_XML(root_node, xml_declaration) | 0.002155 |
def predict_netmhcii_binding(job, peptfile, allele, univ_options, netmhciipan_options):
"""
Predict binding for each peptide in `peptfile` to `allele` using netMHCIIpan.
:param toil.fileStore.FileID peptfile: The input peptide fasta
:param str allele: Allele to predict binding against
:param dict univ_options: Dict of universal options used by almost all tools
:param dict netmhciipan_options: Options specific to netmhciipan binding prediction
:return: tuple of fsID for file containing the predictions and the predictor used (netMHCIIpan)
:rtype: tuple(toil.fileStore.FileID, str)
"""
work_dir = os.getcwd()
input_files = {
'peptfile.faa': peptfile}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
peptides = read_peptide_file(os.path.join(os.getcwd(), 'peptfile.faa'))
if not peptides:
return job.fileStore.writeGlobalFile(job.fileStore.getLocalTempFile()), None
# netMHCIIpan accepts differently formatted alleles so we need to modify the input alleles
if allele.startswith('HLA-DQA') or allele.startswith('HLA-DPA'):
allele = re.sub(r'[*:]', '', allele)
allele = re.sub(r'/', '-', allele)
elif allele.startswith('HLA-DRB'):
allele = re.sub(r':', '', allele)
allele = re.sub(r'\*', '_', allele)
allele = allele.lstrip('HLA-')
else:
raise RuntimeError('Unknown allele seen')
parameters = ['-a', allele,
'-xls', '1',
'-xlsfile', 'predictions.tsv',
'-f', input_files['peptfile.faa']]
# netMHC writes a lot of useless stuff to sys.stdout so we open /dev/null and dump output there.
with open(os.devnull, 'w') as output_catcher:
docker_call(tool='netmhciipan', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=output_catcher,
tool_version=netmhciipan_options['version'])
output_file = job.fileStore.writeGlobalFile('/'.join([work_dir, 'predictions.tsv']))
job.fileStore.logToMaster('Ran netmhciipan on %s successfully' % allele)
return output_file, 'netMHCIIpan' | 0.005906 |
def equation(self):
"""Mix-in class that returns matrix rows for potential-specified conditions.
Returns matrix part (nunknowns,neq)
Returns rhs part nunknowns
"""
mat = np.empty((self.nunknowns, self.model.neq))
# rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero
rhs = self.pc.copy()
for icp in range(self.ncp):
istart = icp * self.nlayers
# rhs[istart:istart+self.nlayers] = self.pc[]
ieq = 0
for e in self.model.elementlist:
if e.nunknowns > 0:
mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \
e.potinflayers(self.xc[icp], self.yc[icp], self.layers)
if e == self:
mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] -= self.resfac[icp]
ieq += e.nunknowns
else:
rhs[istart:istart + self.nlayers] -= \
e.potentiallayers(self.xc[icp], self.yc[icp],
self.layers) # Pretty cool that this works, really
return mat, rhs | 0.005017 |
def create_transfer_job(self, body):
"""
Creates a transfer job that runs periodically.
:param body: (Required) A request body, as described in
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body
:type body: dict
:return: transfer job.
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs#TransferJob
:rtype: dict
"""
body = self._inject_project_id(body, BODY, PROJECT_ID)
return self.get_conn().transferJobs().create(body=body).execute(num_retries=self.num_retries) | 0.004615 |
def projection_as_vec_v3(v, w):
"""Return the signed length of the projection of vector v on vector w.
Returns the full vector result of projection_v3().
"""
proj_len = projection_v3(v, w)
return scale_v3(v, proj_len) | 0.004202 |
def eventFilter(self, object, event):
"""
Filters events for the popup tree widget.
:param object | <QObject>
event | <QEvent>
:retuen <bool> | consumed
"""
try:
is_lineedit = object == self.lineEdit()
except RuntimeError:
is_lineedit = False
try:
is_nav = object == self._navigator
except RuntimeError:
is_nav = False
if not (is_nav or is_lineedit):
return super(XOrbColumnNavigatorBox, self).eventFilter(object, event)
if event.type() == event.KeyPress:
# accept lookup
if event.key() in (Qt.Key_Enter, Qt.Key_Return, Qt.Key_Tab):
self.acceptColumn()
event.accept()
return True
# cancel lookup
elif event.key() == Qt.Key_Escape:
self.hidePopup()
event.accept()
return True
# update the search info
else:
self.lineEdit().keyPressEvent(event)
event.accept()
return True
elif is_nav and event.type() == event.Show:
object.resizeToContents()
object.horizontalScrollBar().setValue(0)
elif event.type() == event.KeyRelease:
self.lineEdit().keyReleaseEvent(event)
self.navigator().blockSignals(True)
self.navigator().setCurrentSchemaPath(self.lineEdit().text())
self.navigator().blockSignals(False)
event.accept()
return True
elif event.type() == event.MouseButtonPress:
local_pos = object.mapFromGlobal(event.globalPos())
in_widget = object.rect().contains(local_pos)
if not in_widget:
self.hidePopup()
event.accept()
return True
return super(XOrbColumnNavigatorBox, self).eventFilter(object, event) | 0.006834 |
def union_elements(elements):
"""elements = [(chr, s, e, id), ...], this is to join elements that have a
deletion in the 'to' species
"""
if len(elements) < 2: return elements
assert set( [e[3] for e in elements] ) == set( [elements[0][3]] ), "more than one id"
el_id = elements[0][3]
unioned_elements = []
for ch, chgrp in groupby(elements, key=itemgetter(0)):
for (s, e) in elem_u( np.array([itemgetter(1, 2)(_) for _ in chgrp], dtype=np.uint) ):
if (s < e):
unioned_elements.append( (ch, s, e, el_id) )
assert len(unioned_elements) <= len(elements)
return unioned_elements | 0.018377 |
def _read_pdb(path):
"""Read PDB file from local drive."""
r_mode = 'r'
openf = open
if path.endswith('.gz'):
r_mode = 'rb'
openf = gzip.open
with openf(path, r_mode) as f:
txt = f.read()
if path.endswith('.gz'):
if sys.version_info[0] >= 3:
txt = txt.decode('utf-8')
else:
txt = txt.encode('ascii')
return path, txt | 0.00431 |
def get_kwdefaults(func, parse_source=False):
r"""
Args:
func (func):
Returns:
dict:
CommandLine:
python -m utool.util_inspect get_kwdefaults
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> func = dummy_func
>>> parse_source = True
>>> kwdefaults = get_kwdefaults(func, parse_source)
>>> print('kwdefaults = %s' % (ut.repr4(kwdefaults),))
"""
#import utool as ut
#with ut.embed_on_exception_context:
argspec = inspect.getargspec(func)
kwdefaults = {}
if argspec.args is None or argspec.defaults is None:
pass
else:
args = argspec.args
defaults = argspec.defaults
#kwdefaults = OrderedDict(zip(argspec.args[::-1], argspec.defaults[::-1]))
kwpos = len(args) - len(defaults)
kwdefaults = OrderedDict(zip(args[kwpos:], defaults))
if parse_source and argspec.keywords:
# TODO parse for kwargs.get/pop
keyword_defaults = parse_func_kwarg_keys(func, with_vals=True)
for key, val in keyword_defaults:
assert key not in kwdefaults, 'parsing error'
kwdefaults[key] = val
return kwdefaults | 0.003946 |
def clear(self):
"""
Removes all data from the buffer.
"""
self.io.seek(0)
self.io.truncate()
for item in self.monitors:
item[2] = 0 | 0.010417 |
def parse_result(self, data):
"""
Parse result of L{pyhsm.defines.YSM_BUFFER_LOAD} command.
@return: Number of bytes now in the YubiHSM internal buffer.
@rtype: integer
@raise pyhsm.exception.YHSM_Error: Unexpected number of bytes were loaded
"""
# typedef struct {
# uint8_t numBytes; // Number of bytes in buffer now
# } YSM_BUFFER_LOAD_RESP;
count = ord(data[0])
if self.offset == 0:
# if offset was 0, the buffer was reset and
# we can verify the length returned
if count != self.data_len:
raise pyhsm.exception.YHSM_Error("Incorrect number of bytes in buffer (got %i, expected %i)" \
% (self.data_len, count))
return count | 0.008294 |
def show_summary(self, **kwargs):
"""
Print a short summary with the status of the flow and a counter task_status --> number_of_tasks
Args:
stream: File-like object, Default: sys.stdout
Example:
Status Count
--------- -------
Completed 10
<Flow, node_id=27163, workdir=flow_gwconv_ecuteps>, num_tasks=10, all_ok=True
"""
stream = kwargs.pop("stream", sys.stdout)
stream.write("\n")
table = list(self.status_counter.items())
s = tabulate(table, headers=["Status", "Count"])
stream.write(s + "\n")
stream.write("\n")
stream.write("%s, num_tasks=%s, all_ok=%s\n" % (str(self), self.num_tasks, self.all_ok))
stream.write("\n") | 0.006227 |
def clone(self, population):
"""
Copy the holder just enough to be able to run a new simulation without modifying the original simulation.
"""
new = empty_clone(self)
new_dict = new.__dict__
for key, value in self.__dict__.items():
if key not in ('population', 'formula', 'simulation'):
new_dict[key] = value
new_dict['population'] = population
new_dict['simulation'] = population.simulation
return new | 0.005894 |
def push(self, bot, channel_type, ar, user_id):
"""
Use this method to push message to user of bot.
The message should be packed into ActionResponse object.
This allows to push text messages, buttons, images.
This also allows to force current state of user.
:param bot: bot that will push user
:type bot: Bot
:param channel_type: one of [telegram, facebook, slack]
:type channel_type: str
:param ar: message packed in response object
:type ar: ActionResponse
:param user_id: user id in used channel
:type user_id: str
"""
self.client.push.__getattr__(bot.name).__call__(_method="POST",
_params=dict(id=user_id, channel=channel_type),
_json=ar.to_json()) | 0.003386 |
def Flush(self):
"""Flush the data to the index."""
super(LabelSet, self).Flush()
self.to_delete = self.to_delete.difference(self.to_set)
with data_store.DB.GetMutationPool() as mutation_pool:
mutation_pool.LabelUpdateLabels(
self.urn, self.to_set, to_delete=self.to_delete)
self.to_set = set()
self.to_delete = set() | 0.005571 |
def unlink_user_account(self, id, provider, user_id):
"""Unlink a user account
Args:
id (str): The user_id of the user identity.
provider (str): The type of identity provider (e.g: facebook).
user_id (str): The unique identifier for the user for the identity.
See: https://auth0.com/docs/api/management/v2#!/Users/delete_user_identity_by_user_id
"""
url = self._url('{}/identities/{}/{}'.format(id, provider, user_id))
return self.client.delete(url) | 0.005587 |
def _set_mode(self, mode='r', bufsize=-1):
"""
Subclasses call this method to initialize the BufferedFile.
"""
# set bufsize in any event, because it's used for readline().
self._bufsize = self._DEFAULT_BUFSIZE
if bufsize < 0:
# do no buffering by default, because otherwise writes will get
# buffered in a way that will probably confuse people.
bufsize = 0
if bufsize == 1:
# apparently, line buffering only affects writes. reads are only
# buffered if you call readline (directly or indirectly: iterating
# over a file will indirectly call readline).
self._flags |= self.FLAG_BUFFERED | self.FLAG_LINE_BUFFERED
elif bufsize > 1:
self._bufsize = bufsize
self._flags |= self.FLAG_BUFFERED
self._flags &= ~self.FLAG_LINE_BUFFERED
elif bufsize == 0:
# unbuffered
self._flags &= ~(self.FLAG_BUFFERED | self.FLAG_LINE_BUFFERED)
if ('r' in mode) or ('+' in mode):
self._flags |= self.FLAG_READ
if ('w' in mode) or ('+' in mode):
self._flags |= self.FLAG_WRITE
if ('a' in mode):
self._flags |= self.FLAG_WRITE | self.FLAG_APPEND
self._size = self._get_size()
self._pos = self._realpos = self._size
if ('b' in mode):
self._flags |= self.FLAG_BINARY
if ('U' in mode):
self._flags |= self.FLAG_UNIVERSAL_NEWLINE
# built-in file objects have this attribute to store which kinds of
# line terminations they've seen:
# <http://www.python.org/doc/current/lib/built-in-funcs.html>
self.newlines = None | 0.001125 |
def get_path_to_repo_and_branch(self, repo: str, branch: str) -> Path:
""" Returns a :class:`Path <pathlib.Path>` to where this specific branch is stored on disk.
:param repo: Repo URL
:param branch: branch
:return: Path to where the specific branch from this repo is being cloned.
"""
return self.get_path_to_repo(repo).resolve() / branch | 0.010309 |
def zSetDefaultMeritFunctionSEQ(self, ofType=0, ofData=0, ofRef=0, pupilInteg=0, rings=0,
arms=0, obscuration=0, grid=0, delVignetted=False, useGlass=False,
glassMin=0, glassMax=1000, glassEdge=0, useAir=False, airMin=0,
airMax=1000, airEdge=0, axialSymm=True, ignoreLatCol=False,
addFavOper=False, startAt=1, relativeXWgt=1.0, overallWgt=1.0,
configNum=0):
"""Sets the default merit function for Sequential Merit Function Editor
Parameters
----------
ofType : integer
optimization function type (0=RMS, ...)
ofData : integer
optimization function data (0=Wavefront, 1=Spot Radius, ...)
ofRef : integer
optimization function reference (0=Centroid, ...)
pupilInteg : integer
pupil integration method (0=Gaussian Quadrature, 1=Rectangular Array)
rings : integer
rings (0=1, 1=2, 2=3, 3=4, ...)
arms : integer
arms (0=6, 1=8, 2=10, 3=12)
obscuration : real
obscuration
delVignetted : boolean
delete vignetted ?
useGlass : boolean
whether to use Glass settings for thickness boundary
glassMin : real
glass mininum thickness
glassMax : real
glass maximum thickness
glassEdge : real
glass edge thickness
useAir : boolean
whether to use Air settings for thickness boundary
airMin : real
air minimum thickness
airMax : real
air maximum thickness
airEdge : real
air edge thickness
axialSymm : boolean
assume axial symmetry
ignoreLatCol : boolean
ignore latent color
addFavOper : boolean
add favorite color
configNum : integer
configuration number (0=All)
startAt : integer
start at
relativeXWgt : real
relative X weight
overallWgt : real
overall weight
"""
mfe = self.pMFE
wizard = mfe.pSEQOptimizationWizard
wizard.pType = ofType
wizard.pData = ofData
wizard.pReference = ofRef
wizard.pPupilIntegrationMethod = pupilInteg
wizard.pRing = rings
wizard.pArm = arms
wizard.pObscuration = obscuration
wizard.pGrid = grid
wizard.pIsDeleteVignetteUsed = delVignetted
wizard.pIsGlassUsed = useGlass
wizard.pGlassMin = glassMin
wizard.pGlassMax = glassMax
wizard.pGlassEdge = glassEdge
wizard.pIsAirUsed = useAir
wizard.pAirMin = airMin
wizard.pAirMax = airMax
wizard.pAirEdge = airEdge
wizard.pIsAssumeAxialSymmetryUsed = axialSymm
wizard.pIsIgnoreLateralColorUsed = ignoreLatCol
wizard.pConfiguration = configNum
wizard.pIsAddFavoriteOperandsUsed = addFavOper
wizard.pStartAt = startAt
wizard.pRelativeXWeight = relativeXWgt
wizard.pOverallWeight = overallWgt
wizard.CommonSettings.OK() | 0.010949 |
async def close(self):
"""|coro|
Closes the connection to discord.
"""
if self._closed:
return
await self.http.close()
self._closed = True
for voice in self.voice_clients:
try:
await voice.disconnect()
except Exception:
# if an error happens during disconnects, disregard it.
pass
if self.ws is not None and self.ws.open:
await self.ws.close()
self._ready.clear() | 0.003731 |
def elbv2_load_balancer_hosted_zone(self, lookup, default=None):
"""
Args:
lookup: the friendly name of the V2 elb to look up
default: value to return in case of no match
Returns:
The hosted zone ID of the ELB found with a name matching 'lookup'.
"""
try:
elb = self._elbv2_load_balancer(lookup)
return elb['CanonicalHostedZoneId']
except ClientError:
return default | 0.00939 |
def program(self, *, vertex_shader, fragment_shader=None, geometry_shader=None,
tess_control_shader=None, tess_evaluation_shader=None, varyings=()) -> 'Program':
'''
Create a :py:class:`Program` object.
Only linked programs will be returned.
A single shader in the `shaders` parameter is also accepted.
The varyings are only used when a transform program is created.
Args:
shaders (list): A list of :py:class:`Shader` objects.
varyings (list): A list of varying names.
Returns:
:py:class:`Program` object
'''
if type(varyings) is str:
varyings = (varyings,)
varyings = tuple(varyings)
res = Program.__new__(Program)
res.mglo, ls1, ls2, ls3, ls4, ls5, res._subroutines, res._geom, res._glo = self.mglo.program(
vertex_shader, fragment_shader, geometry_shader, tess_control_shader, tess_evaluation_shader,
varyings
)
members = {}
for item in ls1:
obj = Attribute.__new__(Attribute)
obj.mglo, obj._location, obj._array_length, obj._dimension, obj._shape, obj._name = item
members[obj.name] = obj
for item in ls2:
obj = Varying.__new__(Varying)
obj._number, obj._array_length, obj._dimension, obj._name = item
members[obj.name] = obj
for item in ls3:
obj = Uniform.__new__(Uniform)
obj.mglo, obj._location, obj._array_length, obj._dimension, obj._name = item
members[obj.name] = obj
for item in ls4:
obj = UniformBlock.__new__(UniformBlock)
obj.mglo, obj._index, obj._size, obj._name = item
members[obj.name] = obj
for item in ls5:
obj = Subroutine.__new__(Subroutine)
obj._index, obj._name = item
members[obj.name] = obj
res._members = members
res.ctx = self
res.extra = None
return res | 0.003837 |
def processStream(self):
"""Process a brotli stream.
"""
print('addr hex{:{}s}binary context explanation'.format(
'', self.width-10))
print('Stream header'.center(60, '-'))
self.windowSize = self.verboseRead(WindowSizeAlphabet())
print('Metablock header'.center(60, '='))
self.ISLAST = False
self.output = bytearray()
while not self.ISLAST:
self.ISLAST = self.verboseRead(
BoolCode('LAST', description="Last block"))
if self.ISLAST:
if self.verboseRead(
BoolCode('EMPTY', description="Empty block")): break
if self.metablockLength(): continue
if not self.ISLAST and self.uncompressed(): continue
print('Block type descriptors'.center(60, '-'))
self.numberOfBlockTypes = {}
self.currentBlockCounts = {}
self.blockTypeCodes = {}
self.blockCountCodes = {}
for blockType in (L,I,D): self.blockType(blockType)
print('Distance code parameters'.center(60, '-'))
self.NPOSTFIX, self.NDIRECT = self.verboseRead(DistanceParamAlphabet())
self.readLiteralContextModes()
print('Context maps'.center(60, '-'))
self.cmaps = {}
#keep the number of each kind of prefix tree for the last loop
numberOfTrees = {I: self.numberOfBlockTypes[I]}
for blockType in (L,D):
numberOfTrees[blockType] = self.contextMap(blockType)
print('Prefix code lists'.center(60, '-'))
self.prefixCodes = {}
for blockType in (L,I,D):
self.readPrefixArray(blockType, numberOfTrees[blockType])
self.metablock() | 0.007226 |
def validate_one(cls, keystr):
""" validates one key string """
regex = r'%s$' % cls.ALLOWED_KEY
if re.match(regex, keystr) is None:
raise cls.Bad("Bad key syntax for: %s. Should be: key1.key2..." % (keystr))
return True | 0.011321 |
def release_value_set(self):
"""
Release a reserved value set so that other executions can use it also.
"""
if self._remotelib:
self._remotelib.run_keyword('release_value_set', [self._my_id], {})
else:
_PabotLib.release_value_set(self, self._my_id) | 0.00641 |
def filter_match_kwargs(kwargs, children=False):
"""
Filters out kwargs for Match construction
:param kwargs:
:type kwargs: dict
:param children:
:type children: Flag to filter children matches
:return: A filtered dict
:rtype: dict
"""
kwargs = kwargs.copy()
for key in ('pattern', 'start', 'end', 'parent', 'formatter', 'value'):
if key in kwargs:
del kwargs[key]
if children:
for key in ('name',):
if key in kwargs:
del kwargs[key]
return kwargs | 0.001799 |
def _parse_csv_header_lcc_csv_v1(headerlines):
'''
This parses the header of the LCC CSV V1 LC format.
'''
# the first three lines indicate the format name, comment char, separator
commentchar = headerlines[1]
separator = headerlines[2]
headerlines = [x.lstrip('%s ' % commentchar) for x in headerlines[3:]]
# next, find the indices of the various LC sections
metadatastart = headerlines.index('OBJECT METADATA')
columnstart = headerlines.index('COLUMN DEFINITIONS')
lcstart = headerlines.index('LIGHTCURVE')
metadata = ' ' .join(headerlines[metadatastart+1:columnstart-1])
columns = ' ' .join(headerlines[columnstart+1:lcstart-1])
metadata = json.loads(metadata)
columns = json.loads(columns)
return metadata, columns, separator | 0.001252 |
def fetch(self, from_time, until_time=None):
"""
This method fetch data from the database according to the period
given
fetch(path, fromTime, untilTime=None)
fromTime is an datetime
untilTime is also an datetime, but defaults to now.
Returns a tuple of (timeInfo, valueList)
where timeInfo is itself a tuple of (fromTime, untilTime, step)
Returns None if no data can be returned
"""
until_time = until_time or datetime.now()
time_info, values = whisper.fetch(self.path,
from_time.strftime('%s'),
until_time.strftime('%s'))
# build up a list of (timestamp, value)
start_time, end_time, step = time_info
current = start_time
times = []
while current <= end_time:
times.append(current)
current += step
return zip(times, values) | 0.002041 |
def create_context(self, state_hash, base_contexts, inputs, outputs):
"""Create a ExecutionContext to run a transaction against.
Args:
state_hash: (str): Merkle root to base state on.
base_contexts (list of str): Context ids of contexts that will
have their state applied to make this context.
inputs (list of str): Addresses that can be read from.
outputs (list of str): Addresses that can be written to.
Returns:
context_id (str): the unique context_id of the session
"""
for address in inputs:
if not self.namespace_is_valid(address):
raise CreateContextException(
"Address or namespace {} listed in inputs is not "
"valid".format(address))
for address in outputs:
if not self.namespace_is_valid(address):
raise CreateContextException(
"Address or namespace {} listed in outputs is not "
"valid".format(address))
addresses_to_find = [add for add in inputs if len(add) == 70]
address_values, reads = self._find_address_values_in_chain(
base_contexts=base_contexts,
addresses_to_find=addresses_to_find)
context = ExecutionContext(
state_hash=state_hash,
read_list=inputs,
write_list=outputs,
base_context_ids=base_contexts)
contexts_asked_not_found = [cid for cid in base_contexts
if cid not in self._contexts]
if contexts_asked_not_found:
raise KeyError(
"Basing a new context off of context ids {} "
"that are not in context manager".format(
contexts_asked_not_found))
context.create_initial(address_values)
self._contexts[context.session_id] = context
if reads:
context.create_prefetch(reads)
self._address_queue.put_nowait(
(context.session_id, state_hash, reads))
return context.session_id | 0.000931 |
def begin_track(tuning, padding=2):
"""Helper function that builds the first few characters of every bar."""
# find longest shorthand tuning base
names = [x.to_shorthand() for x in tuning.tuning]
basesize = len(max(names)) + 3
# Build result
res = []
for x in names:
r = ' %s' % x
spaces = basesize - len(r)
r += ' ' * spaces + '||' + '-' * padding
res.append(r)
return res | 0.002283 |
def goal_update(self, goal_id, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/goals#update-goal"
api_path = "/api/v2/goals/{goal_id}"
api_path = api_path.format(goal_id=goal_id)
return self.call(api_path, method="PUT", data=data, **kwargs) | 0.006873 |
def _fail(self, message, text, i):
"""Raise an exception with given message and text at i."""
raise ValueError("{}:\n{}".format(message, text[i : i + 79])) | 0.017442 |
def _update_plot(self, key, element, bars, lims, ranges):
"""
Process the bars and draw the offset line as necessary. If a
color map is set in the style of the 'main' ViewableElement object, color
the bars appropriately, respecting the required normalization
settings.
"""
main = self.adjoined.main
_, y1 = element.range(1)
offset = self.offset * y1
range_item, main_range, dim = get_sideplot_ranges(self, element, main, ranges)
# Check if plot is colormapped
plot_type = Store.registry['matplotlib'].get(type(range_item))
if isinstance(plot_type, PlotSelector):
plot_type = plot_type.get_plot_class(range_item)
opts = self.lookup_options(range_item, 'plot')
if plot_type and issubclass(plot_type, ColorbarPlot):
cidx = opts.options.get('color_index', None)
if cidx is None:
opts = self.lookup_options(range_item, 'style')
cidx = opts.kwargs.get('color', None)
if cidx not in range_item:
cidx = None
cdim = None if cidx is None else range_item.get_dimension(cidx)
else:
cdim = None
# Get colormapping options
if isinstance(range_item, (HeatMap, Raster)) or cdim:
style = self.lookup_options(range_item, 'style')[self.cyclic_index]
cmap = cm.get_cmap(style.get('cmap'))
main_range = style.get('clims', main_range)
else:
cmap = None
if offset and ('offset_line' not in self.handles):
self.handles['offset_line'] = self.offset_linefn(offset,
linewidth=1.0,
color='k')
elif offset:
self._update_separator(offset)
if cmap is not None:
self._colorize_bars(cmap, bars, element, main_range, dim)
return bars | 0.001979 |
def dependencies(self) -> List[Dependency]:
"""Return the PB dependencies."""
dependencies_str = DB.get_hash_value(self.key, 'dependencies')
dependencies = []
for dependency in ast.literal_eval(dependencies_str):
dependencies.append(Dependency(dependency))
return dependencies | 0.006098 |
def python_data(self):
'''Representation of aggregate value as dictionary.'''
try:
value = self.clean_value
except LookupError:
# XXX is this necessary?
value = self.get_initial()
return self.from_python(value) | 0.007194 |
def get_verify_command(self, signature_filename, data_filename,
keystore=None):
"""
Return a suitable command for verifying a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The verifying command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
cmd.extend(['--verify', signature_filename, data_filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd | 0.002757 |
def slot_schedule_difference(old_schedule, new_schedule):
"""Compute the difference between two schedules from a slot perspective
Parameters
----------
old_schedule : list or tuple
of :py:class:`resources.ScheduledItem` objects
new_schedule : list or tuple
of :py:class:`resources.ScheduledItem` objects
Returns
-------
list
A list of :py:class:`resources.ChangedSlotScheduledItem` objects
Example
-------
>>> from conference_scheduler.resources import Event, Slot, ScheduledItem
>>> from conference_scheduler.scheduler import slot_schedule_difference
>>> events = [Event(f'event_{i}', 30, 0) for i in range(5)]
>>> slots = [Slot(f'venue_{i}', '', 30, 100, None) for i in range(5)]
>>> old_schedule = (
... ScheduledItem(events[0], slots[0]),
... ScheduledItem(events[1], slots[1]),
... ScheduledItem(events[2], slots[2]))
>>> new_schedule = (
... ScheduledItem(events[0], slots[0]),
... ScheduledItem(events[1], slots[2]),
... ScheduledItem(events[2], slots[3]),
... ScheduledItem(events[3], slots[4]))
>>> diff = slot_schedule_difference(old_schedule, new_schedule)
>>> print([item.slot.venue for item in diff])
['venue_1', 'venue_2', 'venue_3', 'venue_4']
"""
old = {item.slot: item for item in old_schedule}
new = {item.slot: item for item in new_schedule}
common_slots = set(old.keys()).intersection(new.keys())
added_slots = new.keys() - old.keys()
removed_slots = old.keys() - new.keys()
changed = [
ChangedSlotScheduledItem(
old[slot].slot, old[slot].event, new[slot].event)
for slot in common_slots
if old[slot].event != new[slot].event
]
added = [
ChangedSlotScheduledItem(new[slot].slot, None, new[slot].event)
for slot in added_slots
]
removed = [
ChangedSlotScheduledItem(old[slot].slot, old[slot].event, None)
for slot in removed_slots
]
return sorted(
changed + added + removed,
key=lambda item: (item.slot.venue, item.slot.starts_at)
) | 0.000464 |
def _write(self, data):
"""
_write: binary data -> None
Packages the given binary data in an API frame and writes the
result to the serial port
"""
frame = APIFrame(data, self._escaped).output()
self.serial.write(frame) | 0.007246 |
def _wait_time(self, shard_state, secs, now=datetime.datetime.now):
"""Time to wait until slice_start_time is secs ago from now.
Args:
shard_state: shard state.
secs: duration in seconds.
now: a func that gets now.
Returns:
0 if no wait. A positive int in seconds otherwise. Always around up.
"""
assert shard_state.slice_start_time is not None
delta = now() - shard_state.slice_start_time
duration = datetime.timedelta(seconds=secs)
if delta < duration:
return util.total_seconds(duration - delta)
else:
return 0 | 0.005111 |
def _get_object_key(self, p_object):
"""Get key from object"""
matched_key = None
matched_index = None
if hasattr(p_object, self._searchNames[0]):
return getattr(p_object, self._searchNames[0])
for x in xrange(len(self._searchNames)):
key = self._searchNames[x]
if hasattr(p_object, key):
matched_key = key
matched_index = x
if matched_key is None:
raise KeyError()
if matched_index != 0 and self._searchOptimize:
self._searchNames.insert(0, self._searchNames.pop(matched_index))
return getattr(p_object, matched_key) | 0.00295 |
def update_network_asset(self, asset_id, name, asset_type):
"""
Updates a Network Asset
Args:
name: The name provided to the network asset
asset_type: The type provided to the network asset
asset_id:
Returns:
"""
self.update_asset('NETWORK', asset_id, name, asset_type) | 0.005634 |
def parse_dom(dom):
"""Parse dom into a Graph.
:param dom: dom as returned by minidom.parse or minidom.parseString
:return: A Graph representation
"""
root = dom.getElementsByTagName("graphml")[0]
graph = root.getElementsByTagName("graph")[0]
name = graph.getAttribute('id')
g = Graph(name)
# # Get attributes
# attributes = []
# for attr in root.getElementsByTagName("key"):
# attributes.append(attr)
# Get nodes
for node in graph.getElementsByTagName("node"):
n = g.add_node(id=node.getAttribute('id'))
for attr in node.getElementsByTagName("data"):
if attr.firstChild:
n[attr.getAttribute("key")] = attr.firstChild.data
else:
n[attr.getAttribute("key")] = ""
# Get edges
for edge in graph.getElementsByTagName("edge"):
source = edge.getAttribute('source')
dest = edge.getAttribute('target')
# source/target attributes refer to IDs: http://graphml.graphdrawing.org/xmlns/1.1/graphml-structure.xsd
e = g.add_edge_by_id(source, dest)
for attr in edge.getElementsByTagName("data"):
if attr.firstChild:
e[attr.getAttribute("key")] = attr.firstChild.data
else:
e[attr.getAttribute("key")] = ""
return g | 0.002033 |
def _rm_joliet_dir(self, joliet_path):
# type: (bytes) -> int
'''
An internal method to remove a directory from the Joliet portion of the ISO.
Parameters:
joliet_path - The Joliet directory to remove.
Returns:
The number of bytes to remove from the ISO for this Joliet directory.
'''
if self.joliet_vd is None:
raise pycdlibexception.PyCdlibInternalError('Tried to remove joliet dir from non-Joliet ISO')
log_block_size = self.joliet_vd.logical_block_size()
joliet_child = self._find_joliet_record(joliet_path)
num_bytes_to_remove = joliet_child.get_data_length()
num_bytes_to_remove += self._remove_child_from_dr(joliet_child,
joliet_child.index_in_parent,
log_block_size)
if joliet_child.ptr is None:
raise pycdlibexception.PyCdlibInternalError('Joliet directory has no path table record; this should not be')
if self.joliet_vd.remove_from_ptr_size(path_table_record.PathTableRecord.record_length(joliet_child.ptr.len_di)):
num_bytes_to_remove += 4 * log_block_size
return num_bytes_to_remove | 0.006255 |
def no_history_check(func):
"""
Decorator function to setup a check to see if history has been turned off,
because if it has then the decorated function needs to throw an exception
:param func: function to decorate
:return: original results or exception
"""
def no_history_check_decorator(self, *args, **kwargs):
if ConnectionBasic.max_history is 0:
raise IndexError("ConnectionBasic.max_history is set to 0, "
"therefore this functionality is disabled")
return func(self, *args, **kwargs)
return no_history_check_decorator | 0.001629 |
def log(self, level, prefix = ''):
"""Writes the contents of the Extension to the logging system.
"""
logging.log(level, "%sname: %s", prefix, self.__name)
logging.log(level, "%soptions: %s", prefix, self.__options) | 0.016194 |
def retrieve_data_blob(self, txid):
"""TODO add docstring"""
rawtx = self.retrieve_tx(txid)
return self.get_data_blob(rawtx) | 0.013514 |
def get_config(self, name, default=MISSING):
"""Get a config value from this adapter by name
Args:
name (string): The name of the config variable
default (object): The default value to return if config is not found
Returns:
object: the value associated with the name
Raises:
ArgumentError: if the name is not found and no default is supplied
"""
res = self.config.get(name, default)
if res is MISSING:
raise ArgumentError("Could not find config value by name and no default supplied", name=name)
return res | 0.006299 |
def _read_ftdna(file):
""" Read and parse Family Tree DNA (FTDNA) file.
https://www.familytreedna.com
Parameters
----------
file : str
path to file
Returns
-------
pandas.DataFrame
individual's genetic data normalized for use with `lineage`
str
name of data source
"""
df = pd.read_csv(
file,
skiprows=1,
na_values="--",
names=["rsid", "chrom", "pos", "genotype"],
index_col=0,
dtype={"chrom": object},
)
# remove incongruous data
df = df.drop(df.loc[df["chrom"] == "0"].index)
df = df.drop(
df.loc[df.index == "RSID"].index
) # second header for concatenated data
# if second header existed, pos dtype will be object (should be np.int64)
df["pos"] = df["pos"].astype(np.int64)
return sort_snps(df), "FTDNA" | 0.003043 |
def _validate_response(self, response):
"""
:param response: requests.models.Response
:raises: pybomb.exceptions.InvalidResponseException
:raises: pybomb.exceptions.BadRequestException
"""
try:
response.raise_for_status()
except HTTPError as http_error:
raise BadRequestException(str(http_error))
response_data = response.json()
if response_data["status_code"] != self.RESPONSE_STATUS_OK:
raise InvalidResponseException(
"Response code {0}: {1}".format(
response_data["status_code"], response_data["error"]
)
) | 0.002928 |
def returner(ret):
'''
Return information to a Kafka server
'''
if __salt__['config.option']('returner.kafka.topic'):
topic = __salt__['config.option']('returner.kafka.topic')
conn = _get_conn()
producer = Producer({'bootstrap.servers': conn})
producer.poll(0)
producer.produce(topic, salt.utils.json.dumps(ret), str(ret).encode('utf-8'), callback=_delivery_report)
producer.flush()
else:
log.error('Unable to find kafka returner config option: topic') | 0.003774 |
def _getphotosets(self):
"""Returns dictionary of photosets retrieved from flickr
d['title']['number_photos'] : Number of photos
d['title']['id'] : ID of photoset
d['title']['photo_id'] : ID of primary photo
d['title']['url'] : URL to photoset
"""
sets={}
if not self._connectToFlickr():
print("Couldn't connect to flickr")
return sets
psets = self.flickr.photosets_getList(user_id=myid)
for myset in psets.find('photosets').findall('photoset'):
key=myset.find('title').text
sets[key]={}
sets[key]['number_photos']=int(myset.attrib['photos'])
sets[key]['photo_id']=(myset.attrib['primary'])
sets[key]['id']=int(myset.attrib['id'])
sets[key]['url']='http://www.flickr.com/photos/%s/sets/%d/'\
%(myid,sets[key]['id'])
return sets | 0.012513 |
def rate(self):
"""Get the sample rate in Hz.
Returns
---------
rate : float
The sample rate, in Hz, calculated from the timestamps
"""
N = len(self.timestamps)
t = self.timestamps[-1] - self.timestamps[0]
rate = 1.0 * N / t
return rate | 0.01173 |
def get_address_coords(self, address):
''' Use the google geocoder to get latitude and longitude for an address string
Args:
address: any address string
Returns:
A tuple of (lat,lng)
'''
url = "https://maps.googleapis.com/maps/api/geocode/json?&address=" + address
r = requests.get(url)
r.raise_for_status()
results = r.json()['results']
lat = results[0]['geometry']['location']['lat']
lng = results[0]['geometry']['location']['lng']
return lat, lng | 0.007105 |
def _get_schema():
"""Get the schema for validation"""
schema_path = os.path.join(os.path.dirname(__file__),
'schema', 'scheduling_block_schema.json')
with open(schema_path, 'r') as file:
schema_data = file.read()
schema = json.loads(schema_data)
return schema | 0.005764 |
def metadata(self):
"""Retrieves metadata about the bucket.
Returns:
A BucketMetadata instance with information about this bucket.
Raises:
Exception if there was an error requesting the bucket's metadata.
"""
if self._info is None:
try:
self._info = self._api.buckets_get(self._name)
except Exception as e:
raise e
return BucketMetadata(self._info) if self._info else None | 0.006849 |
def find(self, strSeq) :
"""returns the first occurence of strSeq in self. Takes polymorphisms into account"""
arr = self.encode(strSeq)
return self._kmp_find(arr[0], self) | 0.036458 |
def load_results_from_table_definition(table_definition, table_definition_file, options):
"""
Load all results in files that are listed in the given table-definition file.
@return: a list of RunSetResult objects
"""
default_columns = extract_columns_from_table_definition_file(table_definition, table_definition_file)
columns_relevant_for_diff = _get_columns_relevant_for_diff(default_columns)
results = []
for tag in table_definition:
if tag.tag == 'result':
columns = extract_columns_from_table_definition_file(tag, table_definition_file) or default_columns
run_set_id = tag.get('id')
for resultsFile in get_file_list_from_result_tag(tag, table_definition_file):
results.append(parallel.submit(
load_result, resultsFile, options, run_set_id, columns, columns_relevant_for_diff))
elif tag.tag == 'union':
results.append(parallel.submit(
handle_union_tag, tag, table_definition_file, options, default_columns, columns_relevant_for_diff))
return [future.result() for future in results] | 0.007024 |
def bias_field_correction(
fmr,
fimout = '',
outpath = '',
fcomment = '_N4bias',
executable = '',
exe_options = [],
sitk_image_mask = True,
verbose = False,):
''' Correct for bias field in MR image(s) given in <fmr> as a string
(single file) or as a list of strings (multiple files).
Output dictionary with the bias corrected file names.
Options:
- fimout: The name (with path) of the output file. It's
ignored when multiple files are given as input. If
given for a single file name, the <outpath> and
<fcomment> options are ignored.
- outpath: Path to the output folder
- fcomment: A prefix comment to the file name
- executable: The path to the executable, overrides the above
choice of software; if 'sitk' is given instead
of the path, the Python module SimpleITK will be
used if it is available.
- exe_options: Options for the executable in the form of a list of
strings.
- sitk_image_mask: Image masking will be used if SimpleITK is
chosen.
'''
if executable=='sitk' and not 'SimpleITK' in sys.modules:
print 'e> if SimpleITK module is required for bias correction' \
+' it needs to be first installed.\n' \
+' Install the module by:\n' \
+' conda install -c https://conda.anaconda.org/simpleitk SimpleITK=1.2.0\n' \
+' or pip install SimpleITK'
return None
#---------------------------------------------------------------------------
# INPUT
#---------------------------------------------------------------------------
#> path to a single file
if isinstance(fmr, basestring) and os.path.isfile(fmr):
fins = [fmr]
#> list of file paths
elif isinstance(fmr, list) and all([os.path.isfile(f) for f in fmr]):
fins = fmr
print 'i> multiple input files => ignoring the single output file name.'
fimout = ''
#> path to a folder
elif isinstance(fmr, basestring) and os.path.isdir(fmr):
fins = [os.path.join(fmr, f) for f in os.listdir(fmr) if f.endswith(('.nii', '.nii.gz'))]
print 'i> multiple input files from input folder.'
fimout = ''
else:
raise ValueError('could not decode the input of floating images.')
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# OUTPUT
#---------------------------------------------------------------------------
#> output path
if outpath=='' and fimout!='':
opth = os.path.dirname(fimout)
if opth=='':
opth = os.path.dirname(fmr)
fimout = os.path.join(opth, fimout)
n4opth = opth
fcomment = ''
elif outpath=='':
opth = os.path.dirname(fmr)
#> N4 bias correction specific folder
n4opth = os.path.join(opth, 'N4bias')
else:
opth = outpath
#> N4 bias correction specific folder
n4opth = os.path.join(opth, 'N4bias')
imio.create_dir(n4opth)
outdct = {}
#---------------------------------------------------------------------------
for fin in fins:
if verbose:
print 'i> input for bias correction:\n', fin
if fimout=='':
# split path
fspl = os.path.split(fin)
# N4 bias correction file output paths
fn4 = os.path.join( n4opth, fspl[1].split('.nii')[0]\
+ fcomment +'.nii.gz')
else:
fn4 = fimout
if not os.path.exists(fn4):
if executable=='sitk':
#==============================================
# SimpleITK Bias field correction for T1 and T2
#==============================================
#> initialise the corrector
corrector = sitk.N4BiasFieldCorrectionImageFilter()
# numberFilltingLevels = 4
# read input file
im = sitk.ReadImage(fin)
#> create a object specific mask
fmsk = os.path.join( n4opth, fspl[1].split('.nii')[0] +'_sitk_mask.nii.gz')
msk = sitk.OtsuThreshold(im, 0, 1, 200)
sitk.WriteImage(msk, fmsk)
#> cast to 32-bit float
im = sitk.Cast( im, sitk.sitkFloat32 )
#-------------------------------------------
print 'i> correcting bias field for', fin
n4out = corrector.Execute(im, msk)
sitk.WriteImage(n4out, fn4)
#-------------------------------------------
if sitk_image_mask:
if not 'fmsk' in outdct: outdct['fmsk'] = []
outdct['fmsk'].append(fmsk)
elif os.path.basename(executable)=='N4BiasFieldCorrection' \
and os.path.isfile(executable):
cmd = [executable, '-i', fin, '-o', fn4]
if verbose and os.path.basename(executable)=='N4BiasFieldCorrection':
cmd.extend(['-v', '1'])
cmd.extend(exe_options)
call(cmd)
if 'command' not in outdct:
outdct['command'] = []
outdct['command'].append(cmd)
elif os.path.isfile(executable):
cmd = [executable]
cmd.extend(exe_options)
call(cmd)
if 'command' not in outdct:
outdct['command'] = cmd
else:
print ' N4 bias corrected file seems already existing.'
#> output to dictionary
if not 'fim' in outdct: outdct['fim'] = []
outdct['fim'].append(fn4)
return outdct | 0.014456 |
def get_value(self, key):
# type: (str) -> Any
"""Get a value from the configuration.
"""
try:
return self._dictionary[key]
except KeyError:
raise ConfigurationError("No such key - {}".format(key)) | 0.011494 |
def modify_image_attribute(self, image_id, attribute='launchPermission',
operation='add', user_ids=None, groups=None,
product_codes=None):
"""
Changes an attribute of an image.
:type image_id: string
:param image_id: The image id you wish to change
:type attribute: string
:param attribute: The attribute you wish to change
:type operation: string
:param operation: Either add or remove (this is required for changing
launchPermissions)
:type user_ids: list
:param user_ids: The Amazon IDs of users to add/remove attributes
:type groups: list
:param groups: The groups to add/remove attributes
:type product_codes: list
:param product_codes: Amazon DevPay product code. Currently only one
product code can be associated with an AMI. Once
set, the product code cannot be changed or reset.
"""
params = {'ImageId' : image_id,
'Attribute' : attribute,
'OperationType' : operation}
if user_ids:
self.build_list_params(params, user_ids, 'UserId')
if groups:
self.build_list_params(params, groups, 'UserGroup')
if product_codes:
self.build_list_params(params, product_codes, 'ProductCode')
return self.get_status('ModifyImageAttribute', params, verb='POST') | 0.004554 |
def xmoe_tr_dense_2k():
"""Series of architectural experiments on Translation.
# run on 8-core setup
119M params, einsum=0.95e13
Returns:
a hparams
"""
hparams = mtf_transformer2.mtf_bitransformer_base()
hparams.encoder_layers = ["self_att", "drd"] * 4
hparams.decoder_layers = ["self_att", "enc_att", "drd"] * 4
hparams.batch_size = 64
hparams.shared_embedding_and_softmax_weights = True
hparams.mesh_shape = "batch:8"
return hparams | 0.019355 |
def get_endpoints(self, start=0, count=-1, filter='', sort=''):
"""
Gets a list of endpoints in a SAN.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: A list of endpoints.
"""
uri = "{}/endpoints/".format(self.data["uri"])
return self._helper.get_all(start, count, filter=filter, sort=sort, uri=uri) | 0.007772 |
def wrap(self, availWidth, availHeight):
"""
All table properties should be known by now.
"""
widths = (availWidth - self.rightColumnWidth,
self.rightColumnWidth)
# makes an internal table which does all the work.
# we draw the LAST RUN's entries! If there are
# none, we make some dummy data to keep the table
# from complaining
if len(self._lastEntries) == 0:
_tempEntries = [(0, 'Placeholder for table of contents', 0)]
else:
_tempEntries = self._lastEntries
lastMargin = 0
tableData = []
tableStyle = [
('VALIGN', (0, 0), (- 1, - 1), 'TOP'),
('LEFTPADDING', (0, 0), (- 1, - 1), 0),
('RIGHTPADDING', (0, 0), (- 1, - 1), 0),
('TOPPADDING', (0, 0), (- 1, - 1), 0),
('BOTTOMPADDING', (0, 0), (- 1, - 1), 0),
]
for i, entry in enumerate(_tempEntries):
level, text, pageNum = entry[:3]
leftColStyle = self.levelStyles[level]
if i: # Not for first element
tableStyle.append((
'TOPPADDING',
(0, i), (- 1, i),
max(lastMargin, leftColStyle.spaceBefore)))
# print leftColStyle.leftIndent
lastMargin = leftColStyle.spaceAfter
#right col style is right aligned
rightColStyle = ParagraphStyle(name='leftColLevel%d' % level,
parent=leftColStyle,
leftIndent=0,
alignment=TA_RIGHT)
leftPara = Paragraph(text, leftColStyle)
rightPara = Paragraph(str(pageNum), rightColStyle)
tableData.append([leftPara, rightPara])
self._table = Table(
tableData,
colWidths=widths,
style=TableStyle(tableStyle))
self.width, self.height = self._table.wrapOn(self.canv, availWidth, availHeight)
return self.width, self.height | 0.001896 |
def _cached_path_needs_update(ca_path, cache_length):
"""
Checks to see if a cache file needs to be refreshed
:param ca_path:
A unicode string of the path to the cache file
:param cache_length:
An integer representing the number of hours the cache is valid for
:return:
A boolean - True if the cache needs to be updated, False if the file
is up-to-date
"""
exists = os.path.exists(ca_path)
if not exists:
return True
stats = os.stat(ca_path)
if stats.st_mtime < time.time() - cache_length * 60 * 60:
return True
if stats.st_size == 0:
return True
return False | 0.00149 |
def today(self) -> datetime:
""" Returns today (date only) as datetime """
self.value = datetime.combine(datetime.today().date(), time.min)
return self.value | 0.01105 |
def confirm_email(self):
""" Confirm email """
if self._email and self.email_new:
self._email = self.email_new
self.email_confirmed = True
self.email_link = None
self.email_new = None
self.email_link_expires = None | 0.007273 |
def get(self, sid):
"""
Constructs a ParticipantContext
:param sid: The sid
:returns: twilio.rest.video.v1.room.room_participant.ParticipantContext
:rtype: twilio.rest.video.v1.room.room_participant.ParticipantContext
"""
return ParticipantContext(self._version, room_sid=self._solution['room_sid'], sid=sid, ) | 0.008152 |
def play(self):
""" Play Conway's Game of Life. """
# Write the initial configuration to file.
self.t = 1 # Current time level
while self.t <= self.T: # Evolve!
# print( "At time level %d" % t)
# Loop over each cell of the grid and apply Conway's rules.
for i in range(self.N):
for j in range(self.N):
live = self.live_neighbours(i, j)
if (self.old_grid[i][j] == 1 and live < 2):
self.new_grid[i][j] = 0 # Dead from starvation.
elif (self.old_grid[i][j] == 1 and (live == 2 or live == 3)):
self.new_grid[i][j] = 1 # Continue living.
elif (self.old_grid[i][j] == 1 and live > 3):
self.new_grid[i][j] = 0 # Dead from overcrowding.
elif (self.old_grid[i][j] == 0 and live == 3):
self.new_grid[i][j] = 1 # Alive from reproduction.
# Output the new configuration.
# The new configuration becomes the old configuration for the next generation.
self.old_grid = self.new_grid.copy()
self.draw_board()
# Move on to the next time level
self.t += 1 | 0.003077 |
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg) | 0.001462 |
def setupRenderModels(self):
"Purpose: Create/destroy GL Render Models"
self.m_rTrackedDeviceToRenderModel = [None] * openvr.k_unMaxTrackedDeviceCount
if self.m_pHMD is None:
return
for unTrackedDevice in range(openvr.k_unTrackedDeviceIndex_Hmd + 1, openvr.k_unMaxTrackedDeviceCount):
if not self.m_pHMD.isTrackedDeviceConnected( unTrackedDevice ):
continue
self.setupRenderModelForTrackedDevice( unTrackedDevice ) | 0.01581 |
def dilate(self, size):
"""
Dilate a region using morphological operators.
Parameters
----------
size : int
Size of dilation in pixels
"""
if size > 0:
from scipy.ndimage.morphology import binary_dilation
size = (size * 2) + 1
coords = self.coordinates
tmp = zeros(self.extent + size * 2)
coords = (coords - self.bbox[0:len(self.center)] + size)
tmp[coords.T.tolist()] = 1
tmp = binary_dilation(tmp, ones((size, size)))
new = asarray(where(tmp)).T + self.bbox[0:len(self.center)] - size
new = [c for c in new if all(c >= 0)]
else:
return self
return one(new) | 0.003886 |
def plot_sector_exposures_longshort(long_exposures, short_exposures,
sector_dict=SECTORS, ax=None):
"""
Plots outputs of compute_sector_exposures as area charts
Parameters
----------
long_exposures, short_exposures : arrays
Arrays of long and short sector exposures (output of
compute_sector_exposures).
sector_dict : dict or OrderedDict
Dictionary of all sectors
- See full description in compute_sector_exposures
"""
if ax is None:
ax = plt.gca()
if sector_dict is None:
sector_names = SECTORS.values()
else:
sector_names = sector_dict.values()
color_list = plt.cm.gist_rainbow(np.linspace(0, 1, 11))
ax.stackplot(long_exposures[0].index, long_exposures,
labels=sector_names, colors=color_list, alpha=0.8,
baseline='zero')
ax.stackplot(long_exposures[0].index, short_exposures,
colors=color_list, alpha=0.8, baseline='zero')
ax.axhline(0, color='k', linestyle='-')
ax.set(title='Long and short exposures to sectors',
ylabel='Proportion of long/short exposure in sectors')
ax.legend(loc='upper left', frameon=True, framealpha=0.5)
return ax | 0.000787 |
def _augment_observation(self, ob, reward, cumulative_reward):
""""Expand observation array with additional information header (top rows).
Args:
ob: observation
reward: reward to be included in header.
cumulative_reward: total cumulated reward to be included in header.
Returns:
Expanded observation array.
"""
img = PIL_Image().new("RGB",
(ob.shape[1], self.HEADER_HEIGHT,))
draw = PIL_ImageDraw().Draw(img)
draw.text(
(1, 0), "c:{:3}, r:{:3}".format(int(cumulative_reward), int(reward)),
fill=(255, 0, 0)
)
draw.text(
(1, 15), "fc:{:3}".format(int(self._frame_counter)),
fill=(255, 0, 0)
)
header = np.asarray(img)
del img
header.setflags(write=1)
# Top row color indicates if WAIT MODE is on.
if self._wait:
pixel_fill = (0, 255, 0)
else:
pixel_fill = (255, 0, 0)
header[0, :, :] = pixel_fill
return np.concatenate([header, ob], axis=0) | 0.002979 |
def get_scan(self, scan_id):
"""
:param scan_id: The scan ID as a string
:return: A resource containing the scan information
"""
url = self.build_full_url('%s%s' % (self.SCANS, scan_id))
_, json_data = self.send_request(url)
return Resource(json_data) | 0.006515 |
def _claskey(obj, style):
'''Wrap an old- or new-style class object.
'''
i = id(obj)
k = _claskeys.get(i, None)
if not k:
_claskeys[i] = k = _Claskey(obj, style)
return k | 0.009852 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.