text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def check(self, inst, value):
"""Raise TypeError if value doesn't satisfy the constraints
for use on instance inst.
"""
if not (self.or_none and value is None):
if self.seq:
self.checktype_seq(value, self.kind,
unique=self.unique, inst=inst)
else:
self.checktype(value, self.kind, inst=inst) | 0.004831 |
def _write_log(self):
"""
Write log info
"""
logger.info("Writing config file %s" % self.path)
if settings.DEBUG:
try:
old_content = open(self.path, 'r').readlines()
except IOError:
old_content = ''
new_content = self.contents().splitlines(True)
diff = difflib.unified_diff(old_content, new_content,
fromfile=self.path, tofile=self.path)
if diff:
logger.debug('Diff:\n' + ''.join(diff))
else:
logger.debug('File not changed') | 0.00311 |
def save_model(self, request, page, form, change):
"""Move the page in the tree if necessary and save every
placeholder :class:`Content <pages.models.Content>`.
"""
language = form.cleaned_data['language']
target = form.data.get('target', None)
position = form.data.get('position', None)
page.save()
# if True, we need to move the page
if target and position:
try:
target = self.model.objects.get(pk=target)
except self.model.DoesNotExist:
pass
else:
target.invalidate()
page.move_to(target, position)
for name in self.mandatory_placeholders:
data = form.cleaned_data[name]
placeholder = PlaceholderNode(name)
extra_data = placeholder.get_extra_data(form.data)
placeholder.save(page, language, data, change,
extra_data=extra_data)
for placeholder in get_placeholders(page.get_template()):
if(placeholder.ctype in form.cleaned_data and placeholder.ctype
not in self.mandatory_placeholders):
data = form.cleaned_data[placeholder.ctype]
extra_data = placeholder.get_extra_data(form.data)
placeholder.save(page, language, data, change,
extra_data=extra_data)
page.invalidate() | 0.002786 |
def set_alembic_revision(path=None):
"""Create/Update alembic table to latest revision number"""
config = Config()
try:
config.set_main_option("script_location", path or "migrations")
script = ScriptDirectory.from_config(config)
head = script.get_current_head()
# create alembic table
metadata, alembic_version = alembic_table_metadata()
metadata.create_all()
item = manager.db.session.query(alembic_version).first()
if item and item.version_num != head:
item.version_num = head
else:
item = alembic_version.insert().values(version_num=head)
item.compile()
conn = manager.db.engine.connect()
conn.execute(item)
manager.db.session.commit()
stdout.write("alembic head is set to %s \n" % head)
except CommandError as e:
stdout.write(e.message) | 0.001096 |
def load_observations((observations, regex, rename), path, filenames):
"""
Returns a provisional name based dictionary of observations of the object.
Each observations is keyed on the date. ie. a dictionary of dictionaries.
:rtype : None
:param observations: dictionary to store observtions into
:param path: the directory where filenames are.
:param filenames: list of files in path.
"""
for filename in filenames:
if re.search(regex, filename) is None:
logging.warning("Skipping {}".format(filename))
continue
print os.path.join(path,filename)
obs = mpc.MPCReader().read(os.path.join(path,filename))
for ob in obs:
if rename:
new_provisional_name = os.path.basename(filename)
new_provisional_name = new_provisional_name[0:new_provisional_name.find(".")]
ob.provisional_name = new_provisional_name
# ob.comment.name = new_provisional_name
key1 = "{:.5f}".format(ob.date.mjd)
key2 = ob.provisional_name
if key1 not in observations:
observations[key1] = {}
if key2 in observations[key1]:
if observations[key1][key2]:
continue
if not observation.null_observation:
logger.error(filename)
logger.error(line)
logger.error(str(observations[key1][key2]))
raise ValueError("conflicting observations for {} in {}".format(key2, key1))
observations[key1][key2] = ob | 0.009219 |
def store_data(self, data, encoding='utf-8'):
"""Put the given content into a file, possibly encoding it as UTF-8
in the process."""
path = random_filename(self.work_path)
try:
with open(path, 'wb') as fh:
if isinstance(data, str):
data = data.encode(encoding)
if data is not None:
fh.write(data)
return self.store_file(path)
finally:
try:
os.unlink(path)
except OSError:
pass | 0.003521 |
def _partition(entity, sep):
"""Python2.4 doesn't have a partition method so we provide
our own that mimics str.partition from later releases.
Split the string at the first occurrence of sep, and return a
3-tuple containing the part before the separator, the separator
itself, and the part after the separator. If the separator is not
found, return a 3-tuple containing the string itself, followed
by two empty strings.
"""
parts = entity.split(sep, 1)
if len(parts) == 2:
return parts[0], sep, parts[1]
else:
return entity, '', '' | 0.001689 |
def encode(self, value):
'''
:param value: value to encode
'''
kassert.is_of_types(value, Bits)
remainder = len(value) % 8
if remainder:
value += Bits(bin='0' * (8 - remainder))
return value | 0.007752 |
def get_object(self, queryset=None):
"""
Implement cache on ``get_object`` method to
avoid repetitive calls, in POST.
"""
if self._cached_object is None:
self._cached_object = super(EntryCacheMixin, self).get_object(
queryset)
return self._cached_object | 0.006079 |
def phisheye_term_list(self, include_inactive=False, **kwargs):
"""Provides a list of terms that are set up for this account.
This call is not charged against your API usage limit.
NOTE: The terms must be configured in the PhishEye web interface: https://research.domaintools.com/phisheye.
There is no API call to set up the terms.
"""
return self._results('phisheye_term_list', '/v1/phisheye/term-list', include_inactive=include_inactive,
items_path=('terms', ), **kwargs) | 0.007067 |
def dump_guest_core(self, filename, compression):
"""Takes a core dump of the guest.
See include/VBox/dbgfcorefmt.h for details on the file format.
in filename of type str
The name of the output file. The file must not exist.
in compression of type str
Reserved for future compression method indicator.
"""
if not isinstance(filename, basestring):
raise TypeError("filename can only be an instance of type basestring")
if not isinstance(compression, basestring):
raise TypeError("compression can only be an instance of type basestring")
self._call("dumpGuestCore",
in_p=[filename, compression]) | 0.008119 |
def _send_success_response(self, response, start_response):
"""Sends an HTTP 200 json success response.
This calls start_response and returns the response body.
Args:
response: A string containing the response body to return.
start_response: A function with semantics defined in PEP-333.
Returns:
A string, the response body.
"""
headers = [('Content-Type', 'application/json; charset=UTF-8')]
return util.send_wsgi_response('200 OK', headers, response, start_response) | 0.001931 |
def check(self, return_code=0):
"""Run command with arguments. Wait for command to complete. If the
exit code was as expected and there is no exception then return,
otherwise raise EasyProcessError.
:param return_code: int, expected return code
:rtype: self
"""
ret = self.call().return_code
ok = ret == return_code
if not ok:
raise EasyProcessError(
self, 'check error, return code is not {0}!'.format(return_code))
return self | 0.005576 |
def request_acquisition(self, acquisition_request):
"""RequestAcquisition.
[Preview API]
:param :class:`<ExtensionAcquisitionRequest> <azure.devops.v5_1.gallery.models.ExtensionAcquisitionRequest>` acquisition_request:
:rtype: :class:`<ExtensionAcquisitionRequest> <azure.devops.v5_1.gallery.models.ExtensionAcquisitionRequest>`
"""
content = self._serialize.body(acquisition_request, 'ExtensionAcquisitionRequest')
response = self._send(http_method='POST',
location_id='3adb1f2d-e328-446e-be73-9f6d98071c45',
version='5.1-preview.1',
content=content)
return self._deserialize('ExtensionAcquisitionRequest', response) | 0.007782 |
def flightmode_menu():
'''construct flightmode menu'''
modes = mestate.mlog.flightmode_list()
ret = []
idx = 0
for (mode,t1,t2) in modes:
modestr = "%s %us" % (mode, (t2-t1))
ret.append(MPMenuCheckbox(modestr, modestr, 'mode-%u' % idx))
idx += 1
mestate.flightmode_selections.append(False)
return ret | 0.008427 |
def build_args(cmd, src, dst):
"""
Build arguments list for passing to subprocess.call_check
:param cmd str: Command string to interpolate src and dst filepaths into.
Typically the output of `config.Config.uic_command` or `config.Config.rcc_command`.
:param src str: Source filepath.
:param dst str: Destination filepath.
"""
cmd = cmd % (quote(src), quote(dst))
args = shlex.split(cmd)
return [arg for arg in args if arg] | 0.006148 |
def translate_obj(self,d,fname):
"""
Translate a field value from a solr document.
This includes special logic for when the field value
denotes an object, here we nest it
"""
if fname not in d:
# TODO: consider adding arg for failure on null
return None
lf = M.label_field(fname)
id = d[fname]
id = self.make_canonical_identifier(id)
#if id.startswith('MGI:MGI:'):
# id = id.replace('MGI:MGI:','MGI:')
obj = {'id': id}
if id:
if self._use_amigo_schema(self.object_category):
iri = expand_uri(id)
else:
iri = expand_uri(id, [get_curie_map('{}/cypher/curies'.format(self.config.scigraph_data.url))])
obj['iri'] = iri
if lf in d:
obj['label'] = d[lf]
cf = fname + "_category"
if cf in d:
obj['category'] = [d[cf]]
if 'aspect' in d and id.startswith('GO:'):
obj['category'] = [ASPECT_MAP[d['aspect']]]
del d['aspect']
return obj | 0.005367 |
def parse(self, data):
# type: (bytes) -> None
'''
Parse the passed in data into a UDF Long AD.
Parameters:
data - The data to parse.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Long Allocation descriptor already initialized')
(self.extent_length, self.log_block_num, self.part_ref_num,
self.impl_use) = struct.unpack_from(self.FMT, data, 0)
self._initialized = True | 0.007576 |
def UpdateNumberOfWarnings(
self, number_of_consumed_warnings, number_of_produced_warnings):
"""Updates the number of warnings.
Args:
number_of_consumed_warnings (int): total number of warnings consumed by
the process.
number_of_produced_warnings (int): total number of warnings produced by
the process.
Returns:
bool: True if either number of warnings has increased.
Raises:
ValueError: if the consumed or produced number of warnings is smaller
than the value of the previous update.
"""
consumed_warnings_delta = 0
if number_of_consumed_warnings is not None:
if number_of_consumed_warnings < self.number_of_consumed_warnings:
raise ValueError(
'Number of consumed warnings smaller than previous update.')
consumed_warnings_delta = (
number_of_consumed_warnings - self.number_of_consumed_warnings)
self.number_of_consumed_warnings = number_of_consumed_warnings
self.number_of_consumed_warnings_delta = consumed_warnings_delta
produced_warnings_delta = 0
if number_of_produced_warnings is not None:
if number_of_produced_warnings < self.number_of_produced_warnings:
raise ValueError(
'Number of produced warnings smaller than previous update.')
produced_warnings_delta = (
number_of_produced_warnings - self.number_of_produced_warnings)
self.number_of_produced_warnings = number_of_produced_warnings
self.number_of_produced_warnings_delta = produced_warnings_delta
return consumed_warnings_delta > 0 or produced_warnings_delta > 0 | 0.005464 |
def update(self, iterations=1, item_id=None, force_flush=False):
"""
Updates the progress bar / percentage indicator.
Parameters
----------
iterations : int (default: 1)
default argument can be changed to integer values
>=1 in order to update the progress indicators more than once
per iteration.
item_id : str (default: None)
Print an item_id sring behind the progress bar
force_flush : bool (default: False)
If True, flushes the progress indicator to the output screen
in each iteration.
"""
self.item_id = item_id
self.cnt += iterations
self._print(force_flush=force_flush)
self._finish() | 0.002628 |
def __request_except(self, requestId, exc, set_and_forget=True):
"""Set exception (if not None) for the given request and (optionally) remove from internal cache & setting its
event"""
try:
with self.__requests:
if set_and_forget:
req = self.__requests.pop(requestId)
else:
req = self.__requests[requestId]
except KeyError:
logger.error('Unknown request %s - cannot set exception', requestId)
else:
if exc is not None:
req.exception = exc
if set_and_forget:
req._set() | 0.006033 |
def _tValueForPointOnCubicCurve(point, cubicCurve, isHorizontal=0):
"""
Finds a t value on a curve from a point.
The points must be originaly be a point on the curve.
This will only back trace the t value, needed to split the curve in parts
"""
pt1, pt2, pt3, pt4 = cubicCurve
a, b, c, d = bezierTools.calcCubicParameters(pt1, pt2, pt3, pt4)
solutions = bezierTools.solveCubic(a[isHorizontal], b[isHorizontal], c[isHorizontal],
d[isHorizontal] - point[isHorizontal])
solutions = [t for t in solutions if 0 <= t < 1]
if not solutions and not isHorizontal:
# can happen that a horizontal line doens intersect, try the vertical
return _tValueForPointOnCubicCurve(point, (pt1, pt2, pt3, pt4), isHorizontal=1)
if len(solutions) > 1:
intersectionLenghts = {}
for t in solutions:
tp = _getCubicPoint(t, pt1, pt2, pt3, pt4)
dist = _distance(tp, point)
intersectionLenghts[dist] = t
minDist = min(intersectionLenghts.keys())
solutions = [intersectionLenghts[minDist]]
return solutions | 0.003587 |
def write_metrics_file(metrics: List[Dict[str, Any]], path: str):
"""
Write metrics data to tab-separated file.
:param metrics: metrics data.
:param path: Path to write to.
"""
with open(path, 'w') as metrics_out:
for checkpoint, metric_dict in enumerate(metrics, 1):
metrics_str = "\t".join(["{}={}".format(name, value) for name, value in sorted(metric_dict.items())])
metrics_out.write("{}\t{}\n".format(checkpoint, metrics_str)) | 0.004098 |
def _simulate_measurement(self, op: ops.Operation, data: _StateAndBuffer,
indices: List[int], measurements: Dict[str, List[bool]],
num_qubits: int) -> None:
"""Simulate an op that is a measurement in the computataional basis."""
meas = ops.op_gate_of_type(op, ops.MeasurementGate)
# TODO: support measurement outside computational basis.
if meas:
invert_mask = meas.invert_mask or num_qubits * (False,)
# Measure updates inline.
bits, _ = wave_function.measure_state_vector(data.state,
indices,
data.state)
corrected = [bit ^ mask for bit, mask in
zip(bits, invert_mask)]
key = protocols.measurement_key(meas)
measurements[key].extend(corrected) | 0.004386 |
def transformed_values(self):
'''Return dictionary of transformed results, with keys being output names.
Returns None if execution result isn't a success.
Reconstructs the pipeline context to materialize values.
'''
if self.success and self.transforms:
with self.reconstruct_context() as context:
values = {
result.step_output_data.output_name: self._get_value(
context, result.step_output_data
)
for result in self.transforms
if result.is_successful_output
}
return values
else:
return None | 0.004213 |
def normalize(self):
"""
Return a new time series with all values normalized to 0 to 1.
:return: `None`
"""
maximum = self.max()
if maximum:
self.values = [value / maximum for value in self.values] | 0.007752 |
def bool_assignment(arg, patterns=None):
"""
Summary:
Enforces correct bool argment assignment
Arg:
:arg (*): arg which must be interpreted as either bool True or False
Returns:
bool assignment | TYPE: bool
"""
arg = str(arg) # only eval type str
try:
if patterns is None:
patterns = (
(re.compile(r'^(true|false)$', flags=re.IGNORECASE), lambda x: x.lower() == 'true'),
(re.compile(r'^(yes|no)$', flags=re.IGNORECASE), lambda x: x.lower() == 'yes'),
(re.compile(r'^(y|n)$', flags=re.IGNORECASE), lambda x: x.lower() == 'y')
)
if not arg:
return '' # default selected
else:
for pattern, func in patterns:
if pattern.match(arg):
return func(arg)
except Exception as e:
raise e | 0.004435 |
def multiget(self, keys, r=None, pr=None, timeout=None,
basic_quorum=None, notfound_ok=None,
head_only=False):
"""
Retrieves a list of keys belonging to this bucket in parallel.
:param keys: the keys to fetch
:type keys: list
:param r: R-Value for the requests (defaults to bucket's R)
:type r: integer
:param pr: PR-Value for the requests (defaults to bucket's PR)
:type pr: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param head_only: whether to fetch without value, so only metadata
(only available on PB transport)
:type head_only: bool
:rtype: list of :class:`RiakObjects <riak.riak_object.RiakObject>`,
:class:`Datatypes <riak.datatypes.Datatype>`, or tuples of
bucket_type, bucket, key, and the exception raised on fetch
"""
bkeys = [(self.bucket_type.name, self.name, key) for key in keys]
return self._client.multiget(bkeys, r=r, pr=pr, timeout=timeout,
basic_quorum=basic_quorum,
notfound_ok=notfound_ok,
head_only=head_only) | 0.002639 |
def pyenv():
'''Install or update the pyenv python environment.
Checkout or update the pyenv repo at ~/.pyenv and enable the pyenv.
Pyenv wird also als Github-Repo "installiert" unter ~/.pyenv
More info:
* https://github.com/yyuu/pyenv
* https://github.com/yyuu/pyenv/wiki/Common-build-problems#requirements
Tutorial:
* http://amaral-lab.org/resources/guides/pyenv-tutorial
'''
install_packages([
'make',
'build-essential',
'libssl-dev',
'zlib1g-dev',
'libbz2-dev',
'libreadline-dev',
'libsqlite3-dev',
'wget',
'curl',
'llvm',
'libncurses5-dev',
'libncursesw5-dev',
])
if exists('~/.pyenv'):
run('cd ~/.pyenv && git pull')
run('~/.pyenv/bin/pyenv update')
else:
run('curl -L https://raw.githubusercontent.com/yyuu/pyenv-installer/'
'master/bin/pyenv-installer | bash')
# add pyenv to $PATH and set up pyenv init
bash_snippet = '~/.bashrc_pyenv'
install_file_legacy(path=bash_snippet)
prefix = flo('if [ -f {bash_snippet} ]; ')
enabler = flo('if [ -f {bash_snippet} ]; then source {bash_snippet}; fi')
if env.host == 'localhost':
# FIXME: next function currently only works for localhost
uncomment_or_update_or_append_line(filename='~/.bashrc', prefix=prefix,
new_line=enabler)
else:
print(cyan('\nappend to ~/.bashrc:\n\n ') + enabler) | 0.000656 |
def load(self):
"""
Fetch data about tag
"""
tags = self.get_data("tags/%s" % self.name)
tag = tags['tag']
for attr in tag.keys():
setattr(self, attr, tag[attr])
return self | 0.00813 |
def mark_message_lines(lines):
"""Mark message lines with markers to distinguish quotation lines.
Markers:
* e - empty line
* m - line that starts with quotation marker '>'
* s - splitter line
* t - presumably lines from the last message in the conversation
>>> mark_message_lines(['answer', 'From: [email protected]', '', '> question'])
'tsem'
"""
markers = ['e' for _ in lines]
i = 0
while i < len(lines):
if not lines[i].strip():
markers[i] = 'e' # empty line
elif QUOT_PATTERN.match(lines[i]):
markers[i] = 'm' # line with quotation marker
elif RE_FWD.match(lines[i]):
markers[i] = 'f' # ---- Forwarded message ----
else:
# in case splitter is spread across several lines
splitter = is_splitter('\n'.join(lines[i:i + SPLITTER_MAX_LINES]))
if splitter:
# append as many splitter markers as lines in splitter
splitter_lines = splitter.group().splitlines()
for j in range(len(splitter_lines)):
markers[i + j] = 's'
# skip splitter lines
i += len(splitter_lines) - 1
else:
# probably the line from the last message in the conversation
markers[i] = 't'
i += 1
return ''.join(markers) | 0.000718 |
def _consfcn(self, x):
""" Evaluates nonlinear constraints and their Jacobian for OPF.
"""
h, g = self._gh(x)
dh, dg = self._dgh(x)
return h, g, dh, dg | 0.010417 |
def find_object_hashes(root, meta_only=False):
"""
Iterator that returns hashes of all of the file and table nodes.
:param root: starting node
"""
stack = [root]
while stack:
obj = stack.pop()
if not meta_only and isinstance(obj, (TableNode, FileNode)):
for objhash in obj.hashes:
yield objhash
stack.extend(itervalues(obj.get_children()))
if obj.metadata_hash is not None:
yield obj.metadata_hash | 0.002024 |
def par_y0bstep(i):
r"""The parallel component of the step to minimise the augmented
Lagrangian with respect to :math:`\mathbf{y}_0`.
Parameters
----------
i : int
Index of grouping to update
"""
global mp_Y0
mp_Y0[i] = 1/mp_rho*mp_S + mp_DX[i] + mp_U0[i] + mp_b | 0.003311 |
def update(self, docs=None, split=0, lfs=None, parallelism=None, progress_bar=True):
"""Update the labels of the specified candidates based on the provided LFs.
:param docs: If provided, apply the updated LFs to all the candidates
in these documents.
:param split: If docs is None, apply the updated LFs to the candidates
in this particular split.
:param lfs: A list of lists of labeling functions to update. Each list
should correspond with the candidate_classes used to initialize the
Labeler.
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the Labeler if
it is provided.
:type parallelism: int
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
:type progress_bar: bool
"""
if lfs is None:
raise ValueError("Please provide a list of lists of labeling functions.")
if len(lfs) != len(self.candidate_classes):
raise ValueError("Please provide LFs for each candidate class.")
self.apply(
docs=docs,
split=split,
lfs=lfs,
train=True,
clear=False,
parallelism=parallelism,
progress_bar=progress_bar,
) | 0.003506 |
def launch_action_group(self, action_id):
"""Start action group."""
header = BASE_HEADERS.copy()
header['Cookie'] = self.__cookie
request = requests.get(
BASE_URL + 'launchActionGroup?oid=' +
action_id,
headers=header,
timeout=10)
if request.status_code != 200:
self.__logged_in = False
self.login()
self.launch_action_group(action_id)
return
try:
result = request.json()
except ValueError as error:
raise Exception(
"Not a valid result for launch" +
"action group, protocol error: " +
request.status_code + ' - ' + request.reason +
" (" + error + ")")
if 'actionGroup' not in result.keys():
raise Exception(
"Could not launch action" +
"group, missing execId.")
return result['actionGroup'][0]['execId'] | 0.001982 |
def disconnect(self, silent=False):
"""Send a 'disconnect' packet, so that the user knows it has been
disconnected (booted actually). This will trigger an onDisconnect()
call on the client side.
Over here, we will kill all ``spawn``ed processes and remove the
namespace from the Socket object.
:param silent: do not actually send the packet (if they asked for a
disconnect for example), but just kill all jobs spawned
by this Namespace, and remove it from the Socket.
"""
if not silent:
packet = {"type": "disconnect",
"endpoint": self.ns_name}
self.socket.send_packet(packet)
# remove_namespace might throw GreenletExit so
# kill_local_jobs must be in finally
try:
self.socket.remove_namespace(self.ns_name)
finally:
self.kill_local_jobs() | 0.002094 |
def is_http_running_on(port):
""" Check if an http server runs on a given port.
Args:
The port to check.
Returns:
True if it is used by an http server. False otherwise.
"""
try:
conn = httplib.HTTPConnection('127.0.0.1:' + str(port))
conn.connect()
conn.close()
return True
except Exception:
return False | 0.011594 |
def weekday(cls, year, month, day):
"""Returns the weekday of the date. 0 = aaitabar"""
return NepDate.from_bs_date(year, month, day).weekday() | 0.012579 |
def file(self, md5=None, sha1=None, sha256=None, **kwargs):
"""Add File data to Batch object.
.. note:: A least one file hash value must be specified.
Args:
md5 (str, optional): The md5 value for this Indicator.
sha1 (str, optional): The sha1 value for this Indicator.
sha256 (str, optional): The sha256 value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
size (str, kwargs): The file size for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of File.
"""
indicator_obj = File(md5, sha1, sha256, **kwargs)
return self._indicator(indicator_obj) | 0.003899 |
def qasm(self, prec=15):
"""Return the corresponding OPENQASM string."""
return ",".join([self.children[j].qasm(prec)
for j in range(self.size())]) | 0.010638 |
def answer(self):
""" Return the answer for the question from the validator.
This will ultimately only be called on the first validator if
multiple validators have been added.
"""
if isinstance(self.validator, list):
return self.validator[0].choice()
return self.validator.choice() | 0.005714 |
def get_setting(*args, **kwargs):
"""Get a setting and raise an appropriate user friendly error if
the setting is not found."""
for name in args:
if hasattr(settings, name):
return getattr(settings, name)
if kwargs.get('raise_error', False):
setting_url = url % args[0].lower().replace('_', '-')
raise ImproperlyConfigured('Please make sure you specified at '
'least one of these settings: %s \r\nDocumentation: %s'
% (args, setting_url))
return kwargs.get('default_value', None) | 0.005367 |
def key_press(self, key, x, y):
"Close the application when the player presses ESCAPE"
if ord(key) == 27:
# print "Escape!"
if bool(glutLeaveMainLoop):
glutLeaveMainLoop()
else:
raise Exception("Application quit") | 0.006579 |
async def generate_status(self, status, avatar=None):
"""Generate a discord status icon below the image provided.
This function is a coroutine.
Parameters:
status: str - a discord status, must be online, idle, dnd, or streaming
avatar: str - http/s url pointing to an avatar, has to have proper headers and be a direct link to an image
(Note, this url is encoded by the wrapper itself, so you don't have to worry about encoding it ;))
Return Type: image data"""
if not isinstance(status, str):
raise TypeError("type of 'status' must be str.")
if avatar and not isinstance(avatar, str):
raise TypeError("type of 'avatar' must be str.")
url = f'https://api.weeb.sh/auto-image/discord-status?status={status}' + (f'&avatar={urllib.parse.quote(avatar, safe="")}' if avatar else '')
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=self.__headers) as resp:
if resp.status == 200:
return await resp.read()
else:
raise Exception((await resp.json())['message']) | 0.004979 |
def online_time_to_string(value, timeFormat, utcOffset=0):
"""Converts AGOL timestamp to formatted string.
Args:
value (float): A UTC timestamp as reported by AGOL (time in ms since Unix epoch * 1000)
timeFormat (str): Date/Time format string as parsed by :py:func:`datetime.strftime`.
utcOffset (int): Hours difference from UTC and desired output. Default is 0 (remain in UTC).
Returns:
str: A string representation of the timestamp.
Examples:
>>> arcresthelper.common.online_time_to_string(1457167261000.0, "%Y-%m-%d %H:%M:%S")
'2016-03-05 00:41:01'
>>> arcresthelper.common.online_time_to_string(731392515000.0, '%m/%d/%Y %H:%M:%S', -8) # PST is UTC-8:00
'03/05/1993 12:35:15'
See Also:
:py:func:`local_time_to_online` for converting a :py:class:`datetime.datetime` object to AGOL timestamp
"""
try:
return datetime.datetime.fromtimestamp(value/1000 + utcOffset*3600).strftime(timeFormat)
except:
line, filename, synerror = trace()
raise ArcRestHelperError({
"function": "online_time_to_string",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
pass | 0.007289 |
def getThirdPartyLibFiles(self, libs):
"""
Retrieves the list of library files for building against the Unreal-bundled versions of the specified third-party libraries
"""
platformDefaults = True
if libs[0] == '--nodefaults':
platformDefaults = False
libs = libs[1:]
details = self.getThirdpartyLibs(libs, includePlatformDefaults=platformDefaults)
return details.getLibraryFiles(self.getEngineRoot(), delimiter='\n') | 0.034247 |
def do_set(parser, token):
'''Calls an arbitrary method on an object.'''
code = token.contents
firstspace = code.find(' ')
if firstspace >= 0:
code = code[firstspace+1:]
return Setter(code) | 0.029557 |
def infer(args):
"""
%prog infer scaffolds.fasta genome.fasta
Infer where the components are in the genome. This function is rarely used,
but can be useful when distributor does not ship an AGP file.
"""
from jcvi.apps.grid import WriteJobs
from jcvi.formats.bed import sort
p = OptionParser(infer.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
scaffoldsf, genomef = args
inferbed = "infer-components.bed"
if need_update((scaffoldsf, genomef), inferbed):
scaffolds = Fasta(scaffoldsf, lazy=True)
genome = Fasta(genomef)
genome = genome.tostring()
args = [(scaffold_name, scaffold, genome) \
for scaffold_name, scaffold in scaffolds.iteritems_ordered()]
pool = WriteJobs(map_one_scaffold, args, inferbed, cpus=opts.cpus)
pool.run()
sort([inferbed, "-i"])
bed = Bed(inferbed)
inferagpbed = "infer.bed"
fw = open(inferagpbed, "w")
seen = []
for b in bed:
r = (b.seqid, b.start, b.end)
if check_seen(r, seen):
continue
print("\t".join(str(x) for x in \
(b.accn, 0, b.span, b.seqid, b.score, b.strand)), file=fw)
seen.append(r)
fw.close()
frombed([inferagpbed]) | 0.002981 |
def reload(self):
"""Reload this database.
Refresh any configured schema into :attr:`ddl_statements`.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDDL
:raises NotFound: if the database does not exist
"""
api = self._instance._client.database_admin_api
metadata = _metadata_with_prefix(self.name)
response = api.get_database_ddl(self.name, metadata=metadata)
self._ddl_statements = tuple(response.statements) | 0.003407 |
def fix_title_capitalization(title):
"""Try to capitalize properly a title string."""
if re.search("[A-Z]", title) and re.search("[a-z]", title):
return title
word_list = re.split(' +', title)
final = [word_list[0].capitalize()]
for word in word_list[1:]:
if word.upper() in COMMON_ACRONYMS:
final.append(word.upper())
elif len(word) > 3:
final.append(word.capitalize())
else:
final.append(word.lower())
return " ".join(final) | 0.001931 |
def endpoint_create_and_update_params(*args, **kwargs):
"""
Collection of options consumed by Transfer endpoint create and update
operations -- accepts toggle regarding create vs. update that makes
display_name required vs. optional.
Usage:
>>> @endpoint_create_and_update_params(create=True)
>>> def command_func(display_name, description, info_link, contact_info,
>>> contact_email, organization, department, keywords,
>>> public, location, disable_verify, myproxy_dn,
>>> myproxy_server, oauth_server, force_encryption,
>>> default_directory, subscription_id, network_use,
>>> max_concurrency, preferred_concurrency,
>>> max_parallelism, preferred_parallelism):
>>> ...
"""
def inner_decorator(f, create=False):
update_help_prefix = (not create and "New ") or ""
# display name is required for create, not update
if create:
f = click.argument("display_name")(f)
else:
f = click.option(
"--display-name", help=(update_help_prefix + "Name for the endpoint")
)(f)
# Options available to any endpoint
f = click.option(
"--description", help=(update_help_prefix + "Description for the endpoint")
)(f)
f = click.option(
"--info-link",
help=(update_help_prefix + "Link for Info about the endpoint"),
)(f)
f = click.option(
"--contact-info",
help=(update_help_prefix + "Contact Info for the endpoint"),
)(f)
f = click.option(
"--contact-email",
help=(update_help_prefix + "Contact Email for the endpoint"),
)(f)
f = click.option(
"--organization",
help=(update_help_prefix + "Organization for the endpoint"),
)(f)
f = click.option(
"--department",
help=(update_help_prefix + "Department which operates the endpoint"),
)(f)
f = click.option(
"--keywords",
help=(
update_help_prefix
+ "Comma separated list of keywords to help searches "
"for the endpoint"
),
)(f)
f = click.option("--default-directory", help=("Set the default directory"))(f)
f = click.option(
"--no-default-directory",
is_flag=True,
flag_value=True,
default=None,
help=("Unset any default directory on the endpoint"),
)(f)
f = click.option(
"--force-encryption/--no-force-encryption",
default=None,
help=("(Un)Force the endpoint to encrypt transfers"),
)(f)
f = click.option(
"--disable-verify/--no-disable-verify",
is_flag=True,
help="(Un)Set the endpoint to ignore checksum verification",
)(f)
# GCS only options
f = click.option(
"--public/--private",
"public",
default=None,
help=(
"Set the endpoint to be public or private "
"(Globus Connect Server only)"
),
)(f)
f = click.option(
"--myproxy-dn",
help=("Set the MyProxy Server DN (Globus Connect Server only)"),
)(f)
f = click.option(
"--myproxy-server",
help=("Set the MyProxy Server URI " "(Globus Connect Server only)"),
)(f)
f = click.option(
"--oauth-server",
help=("Set the OAuth Server URI (Globus Connect Server only)"),
)(f)
f = click.option(
"--location",
type=LocationType(),
default=None,
help="Manually set the endpoint's latitude and longitude "
"(Globus Connect Server only)",
)(f)
# Managed Endpoint options
f = click.option(
"--managed",
"managed",
is_flag=True,
flag_value=True,
default=None,
help=(
"Set the endpoint as a managed endpoint. Requires the "
"user to be a subscription manager. If the user has "
"multiple subscription IDs, --subscription-id must be used "
"instead"
),
)(f)
f = click.option(
"--no-managed",
"managed",
is_flag=True,
flag_value=False,
default=None,
help=(
"Unset the endpoint as a managed endpoint. "
"Does not require the user to be a subscription manager. "
"Mutually exclusive with --subscription-id"
),
)(f)
f = click.option(
"--subscription-id",
type=click.UUID,
default=None,
help=(
"Set the endpoint as a managed endpoint with the given "
"subscription ID. Mutually exclusive with "
"--no-managed"
),
)(f)
f = click.option(
"--network-use",
default=None,
type=click.Choice(["normal", "minimal", "aggressive", "custom"]),
help=(
"Set the endpoint's network use level. If using custom, "
"the endpoint's max and preferred concurrency and "
"parallelism must be set "
"(Managed endpoints only) (Globus Connect Server only)"
),
)(f)
f = click.option(
"--max-concurrency",
type=int,
default=None,
help=(
"Set the endpoint's max concurrency; "
"requires --network-use=custom "
"(Managed endpoints only) (Globus Connect Server only)"
),
)(f)
f = click.option(
"--preferred-concurrency",
type=int,
default=None,
help=(
"Set the endpoint's preferred concurrency; "
"requires --network-use=custom "
"(Managed endpoints only) (Globus Connect Server only)"
),
)(f)
f = click.option(
"--max-parallelism",
type=int,
default=None,
help=(
"Set the endpoint's max parallelism; "
"requires --network-use=custom "
"(Managed endpoints only) (Globus Connect Server only)"
),
)(f)
f = click.option(
"--preferred-parallelism",
type=int,
default=None,
help=(
"Set the endpoint's preferred parallelism; "
"requires --network-use=custom "
"(Managed endpoints only) (Globus Connect Server only)"
),
)(f)
return f
return detect_and_decorate(inner_decorator, args, kwargs) | 0.000844 |
def override_config(self, path):
"""
Will take a yml located in home directory titled '.plugin_config.yml'.
It'll then override, using the yml, the plugin's config file
"""
status = (True, None)
config_override = False
try:
# parse the yml file
c_dict = {}
if exists(self.plugin_config_file):
with open(self.plugin_config_file, 'r') as config_file:
c_dict = yaml.safe_load(config_file.read())
# check for environment variable overrides
check_c_dict = c_dict.copy()
for tool in check_c_dict:
for section in check_c_dict[tool]:
for key in check_c_dict[tool][section]:
if key in environ:
c_dict[tool][section][key] = getenv(key)
# assume the name of the plugin is its directory
plugin_name = path.split('/')[-1]
if plugin_name == '':
plugin_name = path.split('/')[-2]
plugin_config_path = path + '/config/' + plugin_name + '.config'
if exists(plugin_config_path):
plugin_template = Template(plugin_config_path)
plugin_options = c_dict[plugin_name]
for section in plugin_options:
for option in plugin_options[section]:
plugin_template.set_option(section, option,
str(plugin_options[section][option]))
plugin_template.write_config()
config_override = True
except Exception as e: # pragma: no cover
status = (False, str(e))
return status, config_override | 0.001681 |
def write_flows_to_gssha_time_series_xys(self,
path_to_output_file,
series_name,
series_id,
river_index=None,
river_id=None,
date_search_start=None,
date_search_end=None,
daily=False,
filter_mode="mean"):
"""
Write out RAPID output to GSSHA WMS time series xys file.
Parameters
----------
path_to_output_file: str
Path to the output xys file.
series_name: str
The name for the series.
series_id: int
The ID to give the series.
river_index: :obj:`datetime.datetime`, optional
This is the index of the river in the file you want the
streamflow for.
river_id: :obj:`datetime.datetime`, optional
This is the river ID that you want the streamflow for.
date_search_start: :obj:`datetime.datetime`, optional
This is a datetime object with the date of the minimum date for
starting.
date_search_end: :obj:`datetime.datetime`, optional
This is a datetime object with the date of the maximum date for
ending.
daily: bool, optional
If True and the file is CF-Compliant, write out daily flows.
filter_mode: str, optional
You can get the daily average "mean" or the maximum "max".
Defauls is "mean".
Example writing entire time series to file:
.. code:: python
from RAPIDpy import RAPIDDataset
river_id = 3624735
path_to_rapid_qout = '/path/to/Qout.nc'
with RAPIDDataset(path_to_rapid_qout) as qout_nc:
qout_nc.write_flows_to_gssha_time_series_xys(
'/timeseries/Qout_{0}.xys'.format(river_id),
series_name="RAPID_TO_GSSHA_{0}".format(river_id),
series_id=34,
river_id=river_id)
Example writing entire time series as daily average to file:
.. code:: python
from RAPIDpy import RAPIDDataset
river_id = 3624735
path_to_rapid_qout = '/path/to/Qout.nc'
with RAPIDDataset(path_to_rapid_qout) as qout_nc:
# NOTE: Getting the river index is not necessary
# this is just an example of how to use this
river_index = qout_nc.get_river_index(river_id)
# if file is CF compliant, you can write out daily average
qout_nc.write_flows_to_gssha_time_series_xys(
'/timeseries/Qout_daily.xys',
series_name="RAPID_TO_GSSHA_{0}".format(river_id),
series_id=34,
river_index=river_index,
daily=True)
Example writing subset of time series as daily maximum to file:
.. code:: python
from datetime import datetime
from RAPIDpy import RAPIDDataset
river_id = 3624735
path_to_rapid_qout = '/path/to/Qout.nc'
with RAPIDDataset(path_to_rapid_qout) as qout_nc:
# NOTE: Getting the river index is not necessary
# this is just an example of how to use this
river_index = qout_nc.get_river_index(river_id)
# if file is CF compliant, you can filter by date and
# get daily values
qout_nc.write_flows_to_gssha_time_series_xys(
'/timeseries/Qout_daily_date_filter.xys',
series_name="RAPID_TO_GSSHA_{0}".format(river_id),
series_id=34,
river_index=river_index,
date_search_start=datetime(2002, 8, 31),
date_search_end=datetime(2002, 9, 15),
daily=True,
filter_mode="max")
"""
if river_id is not None:
river_index = self.get_river_index(river_id)
elif river_id is None and river_index is None:
raise ValueError(" Need reach id or reach index ...")
self.raise_time_valid()
# analyze and write
qout_df = self.get_qout_index(river_index,
date_search_start=date_search_start,
date_search_end=date_search_end,
daily=daily,
filter_mode=filter_mode,
as_dataframe=True)
with open_csv(path_to_output_file, 'w') as out_ts:
out_ts.write("XYS {0} {1} \"{2}\"\r\n".format(series_id,
len(qout_df.index),
series_name))
for index, pd_row in qout_df.iterrows():
date_str = index.strftime("%m/%d/%Y %I:%M:%S %p")
out_ts.write("\"{0}\" {1:.5f}\n".format(date_str,
pd_row[0])) | 0.002027 |
def pre_install(portal_setup):
"""Runs berfore the first import step of the *default* profile
This handler is registered as a *pre_handler* in the generic setup profile
:param portal_setup: SetupTool
"""
logger.info("SENAITE LIMS pre-install handler [BEGIN]")
# https://docs.plone.org/develop/addons/components/genericsetup.html#custom-installer-code-setuphandlers-py
profile_id = "profile-senaite.lims:default"
context = portal_setup._getImportContext(profile_id)
portal = context.getSite() # noqa
# Only install the core once!
qi = portal.portal_quickinstaller
if not qi.isProductInstalled("bika.lims"):
portal_setup.runAllImportStepsFromProfile("profile-bika.lims:default")
logger.info("SENAITE LIMS pre-install handler [DONE]") | 0.001255 |
def _transformBy(self, matrix, **kwargs):
"""
Subclasses may override this method.
"""
for point in self.points:
point.transformBy(matrix) | 0.010989 |
def run(self):
"""Main entrypoint method.
Returns
-------
new_nodes : `list`
Nodes to add to the doctree.
"""
from lsst.pex.config import ConfigurableField, RegistryField
logger = getLogger(__name__)
try:
task_class_name = self.arguments[0]
except IndexError:
raise SphinxError(
'{} directive requires a Task class '
'name as an argument'.format(self.directive_name))
logger.debug('%s using Task class %s', task_class_name)
task_config_class = get_task_config_class(task_class_name)
config_fields = get_task_config_fields(task_config_class)
all_nodes = []
for field_name, field in config_fields.items():
# Skip fields documented via the `lsst-task-config-subtasks`
# directive
if isinstance(field, (ConfigurableField, RegistryField)):
continue
field_id = format_configfield_id(
'.'.join((task_config_class.__module__,
task_config_class.__name__)),
field_name)
try:
format_field_nodes = get_field_formatter(field)
except ValueError:
logger.debug('Skipping unknown config field type, '
'{0!r}'.format(field))
continue
all_nodes.append(
format_field_nodes(field_name, field, field_id, self.state,
self.lineno)
)
# Fallback if no configuration items are present
if len(all_nodes) == 0:
message = 'No configuration fields.'
return [nodes.paragraph(text=message)]
return all_nodes | 0.001105 |
def validate_enum_attribute(self, attribute: str,
candidates: Set[Union[str, int, float]]) -> None:
""" Validates that the attribute value is among the candidates """
self.add_errors(
validate_enum_attribute(self.fully_qualified_name, self._spec, attribute, candidates)) | 0.015152 |
def make_request_log_message(**args):
'''
Creates a string containing all relevant information
about a request made to the Handle System, for
logging purposes.
:handle: The handle that the request is about.
:url: The url the request is sent to.
:headers: The headers sent along with the request.
:verify: Boolean parameter passed to the requests
module (https verification).
:resp: The request's response.
:op: The library operation during which the request
was sent.
:payload: Optional. The payload sent with the request.
:return: A formatted string.
'''
mandatory_args = ['op', 'handle', 'url', 'headers', 'verify', 'resp']
optional_args = ['payload']
util.check_presence_of_mandatory_args(args, mandatory_args)
util.add_missing_optional_args_with_value_none(args, optional_args)
space = '\n '
message = ''
message += '\n'+args['op']+' '+args['handle']
message += space+'URL: '+args['url']
message += space+'HEADERS: '+str(args['headers'])
message += space+'VERIFY: '+str(args['verify'])
if 'payload' in args.keys():
message += space+'PAYLOAD:'+space+str(args['payload'])
message += space+'RESPONSECODE: '+str(args['resp'].status_code)
message += space+'RESPONSE:'+space+str(args['resp'].content)
return message | 0.000729 |
def disable_switchport(self, inter_type, inter):
"""
Change an interface's operation to L3.
Args:
inter_type: The type of interface you want to configure. Ex.
tengigabitethernet, gigabitethernet, fortygigabitethernet.
inter: The ID for the interface you want to configure. Ex. 1/0/1
Returns:
True if command completes successfully or False if not.
Raises:
None
"""
config = ET.Element('config')
interface = ET.SubElement(config, 'interface',
xmlns=("urn:brocade.com:mgmt:"
"brocade-interface"))
int_type = ET.SubElement(interface, inter_type)
name = ET.SubElement(int_type, 'name')
name.text = inter
ET.SubElement(int_type, 'switchport-basic', operation='delete')
try:
self._callback(config)
return True
# TODO add logging and narrow exception window.
except Exception as error:
logging.error(error)
return False | 0.001783 |
def _ParseKey(self, knowledge_base, registry_key, value_name):
"""Parses a Windows Registry key for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
value_name (str): name of the Windows Registry value.
Raises:
PreProcessFail: if the preprocessing fails.
"""
try:
registry_value = registry_key.GetValueByName(value_name)
except IOError as exception:
raise errors.PreProcessFail((
'Unable to retrieve Windows Registry key: {0:s} value: {1:s} '
'with error: {2!s}').format(
registry_key.path, value_name, exception))
if registry_value:
value_object = registry_value.GetDataAsObject()
if value_object:
self._ParseValueData(knowledge_base, value_object) | 0.005631 |
def classes(self):
"""the partial self.class_name will be used to find actual TestCase classes"""
for module in self.modules():
cs = inspect.getmembers(module, inspect.isclass)
class_name = getattr(self, 'class_name', '')
class_regex = ''
if class_name:
if class_name.startswith("*"):
class_name = class_name.strip("*")
class_regex = re.compile(r'.*?{}'.format(class_name), re.I)
else:
class_regex = re.compile(r'^{}'.format(class_name), re.I)
for c_name, c in cs:
can_yield = True
if class_regex and not class_regex.match(c_name):
#if class_name and class_name not in c_name:
can_yield = False
if can_yield and issubclass(c, unittest.TestCase):
if c is not unittest.TestCase: # ignore actual TestCase class
logger.debug('class: {} matches {}'.format(c_name, class_name))
yield c | 0.007266 |
def add_bidirlstm(self, name, W_h, W_x, b, W_h_back, W_x_back, b_back, hidden_size, input_size,
input_names, output_names,
inner_activation = 'SIGMOID',
cell_state_update_activation = 'TANH',
output_activation = 'TANH',
peep = None, peep_back = None,
output_all = False,
forget_bias = False, coupled_input_forget_gate= False, cell_clip_threshold = 50000.0):
"""
Add a Bi-directional LSTM layer to the model.
Parameters
----------
name: str
The name of this layer.
W_h: [numpy.array]
List of recursion weight matrices for the forward layer. The ordering is [R_i, R_f, R_o, R_z],
where R_i, R_f, R_o, R_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, hidden_size).
W_x: [numpy.array]
List of input weight matrices for the forward layer. The ordering is [W_i, W_f, W_o, W_z],
where W_i, W_f, W_o, W_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, input_size).
b: [numpy.array]
List of biases for the forward layer. The ordering is [b_i, b_f, b_o, b_z],
where b_i, b_f, b_o, b_z are biases at input gate, forget gate, output gate and cell gate.
If None, biases are ignored. Otherwise the shapes of the biases are (hidden_size, ).
W_h_back: [numpy.array]
List of recursion weight matrices for the backward layer. The ordering is [R_i, R_f, R_o, R_z],
where R_i, R_f, R_o, R_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, hidden_size).
W_x_back: [numpy.array]
List of input weight matrices for the backward layer. The ordering is [W_i, W_f, W_o, W_z],
where W_i, W_f, W_o, W_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, input_size).
b_back: [numpy.array]
List of biases for the backward layer. The ordering is [b_i, b_f, b_o, b_z],
where b_i, b_f, b_o, b_z are biases at input gate, forget gate, output gate and cell gate.
The shapes of the biases (hidden_size).
hidden_size: int
Number of hidden units. This is equal to the number of channels of output shape.
input_size: int
Number of the number of channels of input shape.
input_names: [str]
The input blob name list of this layer, in the order of [x, h_input, c_input, h_reverse_input, c_reverse_input].
output_names: [str]
The output blob name list of this layer, in the order of [y, h_output, c_output, h_reverse_output, c_reverse_output].
inner_activation: str
Inner activation function used at input and forget gate. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'SIGMOID'.
cell_state_update_activation: str
Cell state update activation function used at the cell state update gate.
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'TANH'.
output_activation: str
Activation function used at the output gate. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'TANH'.
peep: [numpy.array] | None
List of peephole vectors for the forward layer. The ordering is [p_i, p_f, p_o],
where p_i, p_f, and p_o are peephole vectors at input gate, forget gate, output gate.
The shapes of the peephole vectors are (hidden_size,). Defaults to None.
peep_back: [numpy.array] | None
List of peephole vectors for the backward layer. The ordering is [p_i, p_f, p_o],
where p_i, p_f, and p_o are peephole vectors at input gate, forget gate, output gate.
The shapes of the peephole vectors are (hidden_size,). Defaults to None.
output_all: boolean
Whether the LSTM layer should output at every time step. Defaults to False.
- If False, the output is the result after the final state update.
- If True, the output is a sequence, containing outputs at all time steps.
forget_bias: boolean
If True, a vector of 1s is added to forget gate bias. Defaults to False.
coupled_input_forget_gate : boolean
If True, the input gate and forget gate is coupled. i.e. forget gate is not used.
Defaults to False.
cell_clip_threshold : float
The limit on the maximum and minimum values on the cell state.
Defaults to 50.0.
See Also
--------
add_activation, add_simple_rnn, add_unilstm, add_bidirlstm
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new Layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
for name in input_names:
spec_layer.input.append(name)
for name in output_names:
spec_layer.output.append(name)
spec_layer_params = spec_layer.biDirectionalLSTM
params = spec_layer_params.params
weight_params = spec_layer_params.weightParams.add()
weight_params_back = spec_layer_params.weightParams.add()
# set the parameters
spec_layer_params.inputVectorSize = input_size
spec_layer_params.outputVectorSize = hidden_size
if b is not None:
params.hasBiasVectors = True
params.sequenceOutput = output_all
params.forgetBias = forget_bias
if peep is not None:
params.hasPeepholeVectors = True
params.coupledInputAndForgetGate = coupled_input_forget_gate
params.cellClipThreshold = cell_clip_threshold
#set activations
activation_f = spec_layer_params.activationsForwardLSTM.add()
activation_g = spec_layer_params.activationsForwardLSTM.add()
activation_h = spec_layer_params.activationsForwardLSTM.add()
_set_recurrent_activation(activation_f, inner_activation)
_set_recurrent_activation(activation_g, cell_state_update_activation)
_set_recurrent_activation(activation_h, output_activation)
activation_f_back = spec_layer_params.activationsBackwardLSTM.add()
activation_g_back = spec_layer_params.activationsBackwardLSTM.add()
activation_h_back = spec_layer_params.activationsBackwardLSTM.add()
_set_recurrent_activation(activation_f_back, inner_activation)
_set_recurrent_activation(activation_g_back, cell_state_update_activation)
_set_recurrent_activation(activation_h_back, output_activation)
# Write the forward lstm weights
R_i, R_f, R_o, R_z = W_h
W_i, W_f, W_o, W_z = W_x
weight_params.inputGateWeightMatrix.floatValue.extend(map(float, W_i.flatten()))
weight_params.forgetGateWeightMatrix.floatValue.extend(map(float, W_f.flatten()))
weight_params.outputGateWeightMatrix.floatValue.extend(map(float, W_o.flatten()))
weight_params.blockInputWeightMatrix.floatValue.extend(map(float, W_z.flatten()))
weight_params.inputGateRecursionMatrix.floatValue.extend(map(float, R_i.flatten()))
weight_params.forgetGateRecursionMatrix.floatValue.extend(map(float, R_f.flatten()))
weight_params.outputGateRecursionMatrix.floatValue.extend(map(float, R_o.flatten()))
weight_params.blockInputRecursionMatrix.floatValue.extend(map(float, R_z.flatten()))
if b is not None:
b_i, b_f, b_o, b_z = b
weight_params.inputGateBiasVector.floatValue.extend(map(float, b_i.flatten()))
weight_params.forgetGateBiasVector.floatValue.extend(map(float, b_f.flatten()))
weight_params.outputGateBiasVector.floatValue.extend(map(float, b_o.flatten()))
weight_params.blockInputBiasVector.floatValue.extend(map(float, b_z.flatten()))
if peep is not None:
p_i, p_f, p_o = peep
weight_params.inputGatePeepholeVector.floatValue.extend(map(float, p_i.flatten()))
weight_params.forgetGatePeepholeVector.floatValue.extend(map(float, p_f.flatten()))
weight_params.outputGatePeepholeVector.floatValue.extend(map(float, p_o.flatten()))
# Write the backward lstm weights
R_i, R_f, R_o, R_z = W_h_back
W_i, W_f, W_o, W_z = W_x_back
weight_params_back.inputGateWeightMatrix.floatValue.extend(map(float, W_i.flatten()))
weight_params_back.forgetGateWeightMatrix.floatValue.extend(map(float, W_f.flatten()))
weight_params_back.outputGateWeightMatrix.floatValue.extend(map(float, W_o.flatten()))
weight_params_back.blockInputWeightMatrix.floatValue.extend(map(float, W_z.flatten()))
weight_params_back.inputGateRecursionMatrix.floatValue.extend(map(float, R_i.flatten()))
weight_params_back.forgetGateRecursionMatrix.floatValue.extend(map(float, R_f.flatten()))
weight_params_back.outputGateRecursionMatrix.floatValue.extend(map(float, R_o.flatten()))
weight_params_back.blockInputRecursionMatrix.floatValue.extend(map(float, R_z.flatten()))
if b_back is not None:
b_i, b_f, b_o, b_z = b_back
weight_params_back.inputGateBiasVector.floatValue.extend(map(float, b_i.flatten()))
weight_params_back.forgetGateBiasVector.floatValue.extend(map(float, b_f.flatten()))
weight_params_back.outputGateBiasVector.floatValue.extend(map(float, b_o.flatten()))
weight_params_back.blockInputBiasVector.floatValue.extend(map(float, b_z.flatten()))
if peep_back is not None:
p_i, p_f, p_o = peep_back
weight_params_back.inputGatePeepholeVector.floatValue.extend(map(float, p_i.flatten()))
weight_params_back.forgetGatePeepholeVector.floatValue.extend(map(float, p_f.flatten()))
weight_params_back.outputGatePeepholeVector.floatValue.extend(map(float, p_o.flatten())) | 0.008914 |
def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Delete the rdataset matching I{rdtype} and I{covers}, if it
exists at the node specified by I{name}.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
It is not an error if the node does not exist, or if there is no
matching rdataset at the node.
If the node has no rdatasets after the deletion, it will itself
be deleted.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
name = self._validate_name(name)
if isinstance(rdtype, (str, unicode)):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, (str, unicode)):
covers = dns.rdatatype.from_text(covers)
node = self.get_node(name)
if not node is None:
node.delete_rdataset(self.rdclass, rdtype, covers)
if len(node) == 0:
self.delete_node(name) | 0.002362 |
def p_expr1(p):
"""expr1 : MINUS expr %prec UMINUS
| PLUS expr %prec UMINUS
| NEG expr
| HANDLE ident
| PLUSPLUS ident
| MINUSMINUS ident
"""
p[0] = node.expr(op=p[1], args=node.expr_list([p[2]])) | 0.003663 |
def put_multipart(self, local_path, destination_s3_path, part_size=DEFAULT_PART_SIZE, **kwargs):
"""
Put an object stored locally to an S3 path
using S3 multi-part upload (for files > 8Mb).
:param local_path: Path to source local file
:param destination_s3_path: URL for target S3 location
:param part_size: Part size in bytes. Default: 8388608 (8MB)
:param kwargs: Keyword arguments are passed to the boto function `upload_fileobj` as ExtraArgs
"""
self._check_deprecated_argument(**kwargs)
from boto3.s3.transfer import TransferConfig
# default part size for boto3 is 8Mb, changing it to fit part_size
# provided as a parameter
transfer_config = TransferConfig(multipart_chunksize=part_size)
(bucket, key) = self._path_to_bucket_and_key(destination_s3_path)
self.s3.meta.client.upload_fileobj(
Fileobj=open(local_path, 'rb'), Bucket=bucket, Key=key, Config=transfer_config, ExtraArgs=kwargs) | 0.004864 |
def enabled(name, **kwargs):
'''
Return True if the named service is enabled, false otherwise
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name> <runlevels=single-runlevel>
salt '*' service.enabled <service name> <runlevels=[runlevel1,runlevel2]>
'''
enabled_services = get_enabled()
if name not in enabled_services:
return False
if 'runlevels' not in kwargs:
return True
requested_levels = set(kwargs['runlevels'] if isinstance(kwargs['runlevels'],
list) else [kwargs['runlevels']])
return len(requested_levels - set(enabled_services[name])) == 0 | 0.005666 |
def get_description(self):
'''
:return: description string (with listening host:port)
'''
return '%(description)s listening on %(host)s:%(port)s' % {
'description': super(WebInterface, self).get_description(),
'host': self._host,
'port': self._port
} | 0.006135 |
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):
"""Generate samples of the encoded frames with possible extra data.
By default this function just encodes the numpy array returned as "frame"
from `self.generate_samples` into a PNG image. Override this function to
get other encodings on disk.
Args:
data_dir: final data directory. Typically only used in this method to copy
over user-supplied vocab files if there are extra fields needing them.
tmp_dir: temporary directory that you can use for downloading and scratch.
dataset_split: problem.DatasetSplit, which data split to generate samples
for (for example, training and evaluation).
Yields:
Sample: dict<str feature_name, feature value> which is in disk encoding.
Raises:
ValueError: if the frame has a different number of channels than required.
"""
writer = None
with tf.Graph().as_default():
image_t = tf.placeholder(dtype=tf.uint8, shape=(None, None, None))
encoded_image_t = tf.image.encode_png(image_t)
with tf.Session() as sess:
for features in self.generate_samples(data_dir, tmp_dir, dataset_split):
unencoded_frame = features.pop("frame")
self.validate_frame(unencoded_frame)
height, width, _ = unencoded_frame.shape
encoded_frame = sess.run(
encoded_image_t, feed_dict={image_t: unencoded_frame})
features["image/encoded"] = [encoded_frame]
features["image/format"] = ["png"]
features["image/height"] = [height]
features["image/width"] = [width]
has_debug_image = "image/debug" in features
if has_debug_image:
unencoded_debug = features.pop("image/debug")
encoded_debug = sess.run(
encoded_image_t, feed_dict={image_t: unencoded_debug})
features["image/encoded_debug"] = [encoded_debug]
if self.debug_dump_frames_path:
# Defer creating debug writer until we know debug_dump_frames_path.
if writer is None:
if not tf.gfile.Exists(self.debug_dump_frames_path):
tf.gfile.MkDir(self.debug_dump_frames_path)
writer = debug_video_writer_factory(self.debug_dump_frames_path)
img = unencoded_debug if has_debug_image else unencoded_frame
encoded_img = encoded_debug if has_debug_image else encoded_frame
writer.write(img, encoded_img)
yield features
if self.debug_dump_frames_path:
writer.finish_to_disk() | 0.008829 |
def rms(x):
""""Root Mean Square"
Arguments:
x (seq of float): A sequence of numerical values
Returns:
The square root of the average of the squares of the values
math.sqrt(sum(x_i**2 for x_i in x) / len(x))
or
return (np.array(x) ** 2).mean() ** 0.5
>>> rms([0, 2, 4, 4])
3.0
"""
try:
return (np.array(x) ** 2).mean() ** 0.5
except:
x = np.array(dropna(x))
invN = 1.0 / len(x)
return (sum(invN * (x_i ** 2) for x_i in x)) ** .5 | 0.003717 |
def main(argv):
"""
Main program.
@return: none
"""
global g_script_name
global g_tmp_dir
g_script_name = os.path.basename(argv[0])
# Override any defaults with the user's choices.
parse_args(argv)
# Create tmp dir and clean up on exit with a callback.
g_tmp_dir = tempfile.mkdtemp(suffix=".tmp_minicran")
print "Created tmp directory: " + g_tmp_dir
atexit.register(remove_tmp_dir)
# Do the work.
try:
b = MinicranBuilder(g_print_only, g_output_dir, g_tmp_dir, g_platform, g_rversion, g_branch, g_buildnum)
b.build()
except KeyboardInterrupt:
print("")
pass | 0.00304 |
def uploadchannel(chef, update=False, thumbnails=False, download_attempts=3, resume=False, reset=False, step=Status.LAST.name, token="#", prompt=False, publish=False, debug=False, verbose=True, warn=False, quiet=False, compress=False, stage=False, **kwargs):
""" uploadchannel: Upload channel to Kolibri Studio server
Args:
chef (BaseChef or subclass): class that implements the construct_channel method
update (bool): indicates whether to re-download files (optional)
thumbnails (bool): indicates whether to automatically derive thumbnails from content (optional)
download_attempts (int): number of times to retry downloading files (optional)
resume (bool): indicates whether to resume last session automatically (optional)
step (str): step to resume process from (optional)
reset (bool): indicates whether to start session from beginning automatically (optional)
token (str): content server authorization token
prompt (bool): indicates whether to prompt user to open channel when done (optional)
publish (bool): indicates whether to automatically publish channel (optional)
debug (bool): indicates whether to print out debugging statements (optional)
verbose (bool): indicates whether to print out info statements (optional)
warnings (bool): indicates whether to print out warnings (optional)
quiet (bool): indicates whether to print out errors (optional)
compress (bool): indicates whether to compress larger files (optional)
stage (bool): indicates whether to stage rather than deploy channel (optional)
kwargs (dict): extra keyword args will be passed to get_channel and construct_channel (optional)
Returns: (str) link to access newly created channel
"""
# Set configuration settings
global __logging_handler
level = logging.NOTSET
if debug:
level = logging.DEBUG
elif warn:
level = logging.WARNING
elif quiet:
level = logging.ERROR
elif verbose:
level = logging.INFO
__logging_handler = logging.StreamHandler()
config.LOGGER.addHandler(__logging_handler)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("cachecontrol.controller").setLevel(logging.WARNING)
logging.getLogger("requests.packages").setLevel(logging.WARNING)
config.LOGGER.setLevel(level)
config.UPDATE = update
config.COMPRESS = compress
config.THUMBNAILS = thumbnails
config.STAGE = stage
config.PUBLISH = publish
# Set max retries for downloading
config.DOWNLOAD_SESSION.mount('http://', requests.adapters.HTTPAdapter(max_retries=int(download_attempts)))
config.DOWNLOAD_SESSION.mount('https://', requests.adapters.HTTPAdapter(max_retries=int(download_attempts)))
# Get domain to upload to
config.init_file_mapping_store()
# Authenticate user and check current Ricecooker version
username, token = authenticate_user(token)
check_version_number()
# Setup Sushibar client based on channel info in `get_channel`
config.LOGGER.info("Running get_channel... ")
channel = chef.get_channel(**kwargs)
nomonitor = kwargs.get('nomonitor', False)
config.SUSHI_BAR_CLIENT = SushiBarClient(channel, username, token, nomonitor=nomonitor)
config.LOGGER.info("\n\n***** Starting channel build process *****\n\n")
# Set up progress tracker
config.PROGRESS_MANAGER = RestoreManager()
if (reset or not config.PROGRESS_MANAGER.check_for_session()) and step.upper() != Status.DONE.name:
config.PROGRESS_MANAGER.init_session()
else:
if resume or prompt_yes_or_no('Previous session detected. Would you like to resume your last session?'):
config.LOGGER.info("Resuming your last session...")
step = Status.LAST.name if step is None else step
config.PROGRESS_MANAGER = config.PROGRESS_MANAGER.load_progress(step.upper())
else:
config.PROGRESS_MANAGER.init_session()
# Construct channel if it hasn't been constructed already
if config.PROGRESS_MANAGER.get_status_val() <= Status.CONSTRUCT_CHANNEL.value:
config.LOGGER.info("Calling construct_channel... ")
channel = chef.construct_channel(**kwargs)
config.PROGRESS_MANAGER.set_channel(channel)
channel = config.PROGRESS_MANAGER.channel
# Set initial tree if it hasn't been set already
if config.PROGRESS_MANAGER.get_status_val() <= Status.CREATE_TREE.value:
config.PROGRESS_MANAGER.set_tree(create_initial_tree(channel))
tree = config.PROGRESS_MANAGER.tree
# Download files if they haven't been downloaded already
if config.PROGRESS_MANAGER.get_status_val() <= Status.DOWNLOAD_FILES.value:
config.LOGGER.info("Downloading files...")
config.PROGRESS_MANAGER.set_files(*process_tree_files(tree))
# Set download manager in case steps were skipped
files_to_diff = config.PROGRESS_MANAGER.files_downloaded
config.FAILED_FILES = config.PROGRESS_MANAGER.files_failed
# Get file diff if it hasn't been generated already
if config.PROGRESS_MANAGER.get_status_val() <= Status.GET_FILE_DIFF.value:
config.LOGGER.info("Getting file diff...")
config.PROGRESS_MANAGER.set_diff(get_file_diff(tree, files_to_diff))
file_diff = config.PROGRESS_MANAGER.file_diff
# Set which files have already been uploaded
tree.uploaded_files = config.PROGRESS_MANAGER.files_uploaded
# Upload files if they haven't been uploaded already
if config.PROGRESS_MANAGER.get_status_val() <= Status.UPLOADING_FILES.value:
config.LOGGER.info("Uploading files...")
config.PROGRESS_MANAGER.set_uploaded(upload_files(tree, file_diff))
# Create channel on Kolibri Studio if it hasn't been created already
if config.PROGRESS_MANAGER.get_status_val() <= Status.UPLOAD_CHANNEL.value:
config.LOGGER.info("Creating channel...")
config.PROGRESS_MANAGER.set_channel_created(*create_tree(tree))
channel_link = config.PROGRESS_MANAGER.channel_link
channel_id = config.PROGRESS_MANAGER.channel_id
# Publish tree if flag is set to True
if config.PUBLISH and config.PROGRESS_MANAGER.get_status_val() <= Status.PUBLISH_CHANNEL.value:
config.LOGGER.info("Publishing channel...")
publish_tree(tree, channel_id)
config.PROGRESS_MANAGER.set_published()
# Open link on web browser (if specified) and return new link
config.LOGGER.info("\n\nDONE: Channel created at {0}\n".format(channel_link))
if prompt and prompt_yes_or_no('Would you like to open your channel now?'):
config.LOGGER.info("Opening channel... ")
webbrowser.open_new_tab(channel_link)
config.PROGRESS_MANAGER.set_done()
return channel_link | 0.003617 |
def next(self, *arg, **kwarg):
"""Load the next orbit into .data.
Note
----
Forms complete orbits across day boundaries. If no data loaded
then the first orbit from the first date of data is returned.
"""
# first, check if data exists
if not self.sat.empty:
# set up orbit metadata
self._calcOrbits()
# if current orbit near the last, must be careful
if self._current == (self.num - 1):
# first, load last orbit data
self._getBasicOrbit(orbit=-1)
# End of orbit may occur on the next day
load_next = True
if self.sat._iter_type == 'date':
delta = self.sat.date - self.sat.data.index[-1] \
+ pds.Timedelta('1 day')
if delta >= self.orbit_period:
# don't need to load the next day because this orbit
# ends more than a orbital period from the next date
load_next = False
if load_next:
# the end of the user's desired orbit occurs tomorrow, need
# to form a complete orbit save this current orbit, load
# the next day, combine data, select the correct orbit
temp_orbit_data = self.sat.data.copy()
try:
# loading next day/file clears orbit breaks info
self.sat.next()
if not self.sat.empty:
# combine this next day's data with previous last
# orbit, grab the first one
self.sat.data = pds.concat(
[temp_orbit_data[:self.sat.data.index[0] -
pds.DateOffset(microseconds=1)],
self.sat.data])
self._getBasicOrbit(orbit=1)
else:
# no data, go back a day and grab the last orbit.
# As complete as orbit can be
self.sat.prev()
self._getBasicOrbit(orbit=-1)
except StopIteration:
pass
del temp_orbit_data
# includes hack to appear to be zero indexed
print('Loaded Orbit:%i' % (self._current - 1))
elif self._current == (self.num):
# at the last orbit, need to be careful about getting the next
# orbit save this current orbit and load the next day
temp_orbit_data = self.sat.data.copy()
# load next day, which clears orbit breaks info
self.sat.next()
# combine this next day orbit with previous last orbit to
# ensure things are correct
if not self.sat.empty:
pad_next = True
# check if data padding is really needed, only works when
# loading by date
if self.sat._iter_type == 'date':
delta = self.sat.date - temp_orbit_data.index[-1]
if delta >= self.orbit_period:
# the end of the previous orbit is more than an
# orbit away from today we don't have to worry
# about it
pad_next = False
if pad_next:
# orbit went across day break, stick old orbit onto new
# data and grab second orbit (first is old)
self.sat.data = pds.concat(
[temp_orbit_data[:self.sat.data.index[0] -
pds.DateOffset(microseconds=1)],
self.sat.data])
# select second orbit of combined data
self._getBasicOrbit(orbit=2)
else:
# padding from the previous orbit wasn't needed, can
# just grab the first orbit of loaded data
self._getBasicOrbit(orbit=1)
if self.sat._iter_type == 'date':
delta = self.sat.date + pds.DateOffset(days=1) \
- self.sat.data.index[0]
if delta < self.orbit_period:
# this orbits end occurs on the next day, though
# we grabbed the first orbit, missing data
# means the first available orbit in the data
# is actually the last for the day. Resetting to
# the second to last orbit and then calling
# next() will get the last orbit, accounting
# for tomorrow's data as well.
self._current = self.num - 1
self.next()
else:
# no data for the next day
# continue loading data until there is some
# nextData raises StopIteration when it reaches the end,
# leaving this function
while self.sat.empty:
self.sat.next()
self._getBasicOrbit(orbit=1)
del temp_orbit_data
# includes hack to appear to be zero indexed
print('Loaded Orbit:%i' % (self._current - 1))
elif self._current == 0:
# no current orbit set, grab the first one
# using load command to specify the first orbit, which
# automatically loads prev day if needed to form complete orbit
self.load(orbit=1)
elif self._current < (self.num - 1):
# since we aren't close to the last orbit, just pull the next
# orbit
self._getBasicOrbit(orbit=self._current + 1)
# includes hack to appear to be zero indexed
print('Loaded Orbit:%i' % (self._current - 1))
else:
raise Exception('You ended up where nobody should ever be. ' +
'Talk to someone about this fundamental ' +
'failure.')
else: # no data
while self.sat.empty:
# keep going until data is found
# next raises stopIteration at end of data set, no more data
# possible
self.sat.next()
# we've found data, grab the next orbit
self.next() | 0.000714 |
def queryResponse(self, queryEngine, query=None, vendorSpecific=None, **kwargs):
"""CNRead.query(session, queryEngine, query) → OctetStream
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNRead.query.
Args:
queryEngine:
query:
vendorSpecific:
**kwargs:
Returns:
"""
return self.GET(
['query', queryEngine, query], headers=vendorSpecific, query=kwargs
) | 0.005952 |
def uav_example():
"""A basic example of how to use the UAV agent."""
env = holodeck.make("UrbanCity")
# This changes the control scheme for the uav
env.set_control_scheme("uav0", ControlSchemes.UAV_ROLL_PITCH_YAW_RATE_ALT)
for i in range(10):
env.reset()
# This command tells the UAV to not roll or pitch, but to constantly yaw left at 10m altitude.
command = np.array([0, 0, 2, 10])
for _ in range(1000):
state, reward, terminal, _ = env.step(command)
# To access specific sensor data:
pixels = state[Sensors.PIXEL_CAMERA]
velocity = state[Sensors.VELOCITY_SENSOR] | 0.002985 |
def parse_timecode(cls, timecode):
"""parses timecode string frames '00:00:00:00' or '00:00:00;00' or
milliseconds '00:00:00:000'
"""
bfr = timecode.replace(';', ':').replace('.', ':').split(':')
hrs = int(bfr[0])
mins = int(bfr[1])
secs = int(bfr[2])
frs = int(bfr[3])
return hrs, mins, secs, frs | 0.00542 |
def get_context_data(self, *args, **kwargs):
'''
Context:
If GET parameters are given:
- search_text
- form (FilterBillsForm)
- long_description
- description
- get_params
Otherwise, the only context item is an unbound FilterBillsForm.
Templates:
- Are specified in subclasses.
'''
context = super(RelatedBillsList, self).get_context_data(*args,
**kwargs)
metadata = context['metadata']
FilterBillsForm = get_filter_bills_form(metadata)
if self.request.GET:
form = FilterBillsForm(self.request.GET)
search_text = form.data.get('search_text')
context.update(search_text=search_text)
context.update(form=FilterBillsForm(self.request.GET))
# human readable description of search
description = []
if metadata:
description.append(metadata['name'])
else:
description = ['Search All']
long_description = []
chamber = form.data.get('chamber')
session = form.data.get('session')
type = form.data.get('type')
status = form.data.getlist('status')
subjects = form.data.getlist('subjects')
sponsor = form.data.get('sponsor__leg_id')
if chamber:
if metadata:
description.append(metadata['chambers'][chamber]['name']
)
else:
description.extend([chamber.title(), 'Chamber'])
description.append((type or 'Bill') + 's')
if session:
description.append(
'(%s)' %
metadata['session_details'][session]['display_name']
)
if 'signed' in status:
long_description.append('which have been signed into law')
elif 'passed_upper' in status and 'passed_lower' in status:
long_description.append('which have passed both chambers')
elif 'passed_lower' in status:
chamber_name = (metadata['chambers']['lower']['name']
if metadata else 'lower chamber')
long_description.append('which have passed the ' +
chamber_name)
elif 'passed_upper' in status:
chamber_name = (metadata['chambers']['upper']['name']
if metadata else 'upper chamber')
long_description.append('which have passed the ' +
chamber_name)
if sponsor:
leg = db.legislators.find_one({'_all_ids': sponsor},
fields=('full_name', '_id'))
leg = leg['full_name']
long_description.append('sponsored by ' + leg)
if subjects:
long_description.append('related to ' + ', '.join(subjects))
if search_text:
long_description.append(u'containing the term "{0}"'.format(
search_text))
context.update(long_description=long_description)
else:
if metadata:
description = [metadata['name'], 'Bills']
else:
description = ['All Bills']
context.update(form=FilterBillsForm())
context.update(description=' '.join(description))
# Add the correct path to paginated links.
params = list(self.request.GET.lists())
for k, v in params[:]:
if k == 'page':
params.remove((k, v))
get_params = urllib.urlencode(params, doseq=True)
context['get_params'] = get_params
# Add the abbr.
context['abbr'] = self.kwargs['abbr']
return context | 0.000741 |
def p_gate_id_list_1(self, program):
"""
gate_id_list : gate_id_list ',' id
"""
program[0] = program[1]
program[0].add_child(program[3])
self.update_symtab(program[3]) | 0.009174 |
def open(self, url):
"""
Open an XML document at the specified I{URL}.
First, a preparsed document is looked up in the I{object cache}. If not
found, its content is fetched from an external source and parsed using
the SAX parser. The result is cached for the next open().
@param url: A document URL.
@type url: str.
@return: The specified XML document.
@rtype: I{Document}
"""
cache = self.__cache()
id = self.mangle(url, "document")
xml = cache.get(id)
if xml is None:
xml = self.__fetch(url)
cache.put(id, xml)
self.plugins.document.parsed(url=url, document=xml.root())
return xml | 0.002714 |
def parse_args(self, args=None, namespace=None):
""" Overrides argparse.ArgumentParser.parse_args
Enables '@'-prefixed files to be expanded before arguments are processed
by ArgumentParser.parse_args as usual
"""
self._expand_prefixed_files(args)
return super(CLICommandParser, self).parse_args(args) | 0.008596 |
def add_child_log(self, log_id, child_id):
"""Adds a child to a log.
arg: log_id (osid.id.Id): the ``Id`` of a log
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: AlreadyExists - ``log_id`` is already a parent of
``child_id``
raise: NotFound - ``log_id`` or ``child_id`` not found
raise: NullArgument - ``log_id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.add_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.add_child_catalog(catalog_id=log_id, child_id=child_id)
return self._hierarchy_session.add_child(id_=log_id, child_id=child_id) | 0.003109 |
def _getel(key, value):
"""Returns an element given a key and value."""
if key in ['HorizontalRule', 'Null']:
return elt(key, 0)()
elif key in ['Plain', 'Para', 'BlockQuote', 'BulletList',
'DefinitionList', 'HorizontalRule', 'Null']:
return elt(key, 1)(value)
return elt(key, len(value))(*value) | 0.002907 |
def format_uncertainty(analysis, result, decimalmark='.', sciformat=1):
"""
Returns the formatted uncertainty according to the analysis, result
and decimal mark specified following these rules:
If the "Calculate precision from uncertainties" is enabled in
the Analysis service, and
a) If the the non-decimal number of digits of the result is above
the service's ExponentialFormatPrecision, the uncertainty will
be formatted in scientific notation. The uncertainty exponential
value used will be the same as the one used for the result. The
uncertainty will be rounded according to the same precision as
the result.
Example:
Given an Analysis with an uncertainty of 37 for a range of
results between 30000 and 40000, with an
ExponentialFormatPrecision equal to 4 and a result of 32092,
this method will return 0.004E+04
b) If the number of digits of the integer part of the result is
below the ExponentialFormatPrecision, the uncertainty will be
formatted as decimal notation and the uncertainty will be
rounded one position after reaching the last 0 (precision
calculated according to the uncertainty value).
Example:
Given an Analysis with an uncertainty of 0.22 for a range of
results between 1 and 10 with an ExponentialFormatPrecision
equal to 4 and a result of 5.234, this method will return 0.2
If the "Calculate precision from Uncertainties" is disabled in the
analysis service, the same rules described above applies, but the
precision used for rounding the uncertainty is not calculated from
the uncertainty neither the result. The fixed length precision is
used instead.
For further details, visit
https://jira.bikalabs.com/browse/LIMS-1334
If the result is not floatable or no uncertainty defined, returns
an empty string.
The default decimal mark '.' will be replaced by the decimalmark
specified.
:param analysis: the analysis from which the uncertainty, precision
and other additional info have to be retrieved
:param result: result of the analysis. Used to retrieve and/or
calculate the precision and/or uncertainty
:param decimalmark: decimal mark to use. By default '.'
:param sciformat: 1. The sci notation has to be formatted as aE^+b
2. The sci notation has to be formatted as ax10^b
3. As 2, but with super html entity for exp
4. The sci notation has to be formatted as a·10^b
5. As 4, but with super html entity for exp
By default 1
:returns: the formatted uncertainty
"""
try:
result = float(result)
except ValueError:
return ""
objres = None
try:
objres = float(analysis.getResult())
except ValueError:
pass
if result == objres:
# To avoid problems with DLs
uncertainty = analysis.getUncertainty()
else:
uncertainty = analysis.getUncertainty(result)
if uncertainty is None or uncertainty == 0:
return ""
# Scientific notation?
# Get the default precision for scientific notation
threshold = analysis.getExponentialFormatPrecision()
precision = analysis.getPrecision(result)
formatted = _format_decimal_or_sci(uncertainty, precision, threshold,
sciformat)
return formatDecimalMark(formatted, decimalmark) | 0.000282 |
def evaluate(ref_time, ref_freq, est_time, est_freq, **kwargs):
"""Evaluate two melody (predominant f0) transcriptions, where the first is
treated as the reference (ground truth) and the second as the estimate to
be evaluated (prediction).
Examples
--------
>>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')
>>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')
>>> scores = mir_eval.melody.evaluate(ref_time, ref_freq,
... est_time, est_freq)
Parameters
----------
ref_time : np.ndarray
Time of each reference frequency value
ref_freq : np.ndarray
Array of reference frequency values
est_time : np.ndarray
Time of each estimated frequency value
est_freq : np.ndarray
Array of estimated frequency values
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
# Convert to reference/estimated voicing/frequency (cent) arrays
(ref_voicing, ref_cent,
est_voicing, est_cent) = util.filter_kwargs(
to_cent_voicing, ref_time, ref_freq, est_time, est_freq, **kwargs)
# Compute metrics
scores = collections.OrderedDict()
(scores['Voicing Recall'],
scores['Voicing False Alarm']) = util.filter_kwargs(voicing_measures,
ref_voicing,
est_voicing, **kwargs)
scores['Raw Pitch Accuracy'] = util.filter_kwargs(raw_pitch_accuracy,
ref_voicing, ref_cent,
est_voicing, est_cent,
**kwargs)
scores['Raw Chroma Accuracy'] = util.filter_kwargs(raw_chroma_accuracy,
ref_voicing, ref_cent,
est_voicing, est_cent,
**kwargs)
scores['Overall Accuracy'] = util.filter_kwargs(overall_accuracy,
ref_voicing, ref_cent,
est_voicing, est_cent,
**kwargs)
return scores | 0.000385 |
def load_tempo(filename, delimiter=r'\s+'):
r"""Load tempo estimates from an annotation file in MIREX format.
The file should consist of three numeric columns: the first two
correspond to tempo estimates (in beats-per-minute), and the third
denotes the relative confidence of the first value compared to the
second (in the range [0, 1]). The file should contain only one row.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
tempi : np.ndarray, non-negative
The two tempo estimates
weight : float [0, 1]
The relative importance of ``tempi[0]`` compared to ``tempi[1]``
"""
# Use our universal function to load the key and mode strings
t1, t2, weight = load_delimited(filename, [float, float, float], delimiter)
weight = weight[0]
tempi = np.concatenate([t1, t2])
if len(t1) != 1:
raise ValueError('Tempo file should contain only one line.')
# Validate them, but throw a warning in place of an error
try:
tempo.validate_tempi(tempi)
except ValueError as error:
warnings.warn(error.args[0])
if not 0 <= weight <= 1:
raise ValueError('Invalid weight: {}'.format(weight))
return tempi, weight | 0.000713 |
def get_stops(
feed: "Feed",
date: Optional[str] = None,
trip_id: Optional[str] = None,
route_id: Optional[str] = None,
*,
in_stations: bool = False,
) -> DataFrame:
"""
Return a section of ``feed.stops``.
Parameters
-----------
feed : Feed
date : string
YYYYMMDD string; restricts the output to stops active
(visited by trips) on the date
trip_id : string
ID of a trip in ``feed.trips``; restricts output to stops
visited by the trip
route_id : string
ID of route in ``feed.routes``; restricts output to stops
visited by the route
in_stations : boolean
If ``True``, then restricts output to stops in stations if
station data is available in ``feed.stops``
Returns
-------
DataFrame
A subset of ``feed.stops`` defined by the parameters above
Notes
-----
Assume the following feed attributes are not ``None``:
- ``feed.stops``
- Those used in :func:`.stop_times.get_stop_times`
"""
s = feed.stops.copy()
if date is not None:
A = feed.get_stop_times(date)["stop_id"]
s = s[s["stop_id"].isin(A)].copy()
if trip_id is not None:
st = feed.stop_times.copy()
B = st[st["trip_id"] == trip_id]["stop_id"]
s = s[s["stop_id"].isin(B)].copy()
elif route_id is not None:
A = feed.trips[feed.trips["route_id"] == route_id]["trip_id"]
st = feed.stop_times.copy()
B = st[st["trip_id"].isin(A)]["stop_id"]
s = s[s["stop_id"].isin(B)].copy()
if in_stations and set(["location_type", "parent_station"]) <= set(
s.columns
):
s = s[(s["location_type"] != 1) & (s["parent_station"].notnull())]
return s | 0.000565 |
def acquire(self, **kwargs):
"""
Copy the file and return its path
Returns
-------
str or None
The path of the file or None if it does not exist or if
verification failed.
"""
path = path_string(self.path)
if os.path.exists(path):
if config.verify_file(path, self.sha256):
return path
return None | 0.004751 |
def save_hex(hex_file, path):
"""
Given a string representation of a hex file, this function copies it to
the specified path thus causing the device mounted at that point to be
flashed.
If the hex_file is empty it will raise a ValueError.
If the filename at the end of the path does not end in '.hex' it will raise
a ValueError.
"""
if not hex_file:
raise ValueError('Cannot flash an empty .hex file.')
if not path.endswith('.hex'):
raise ValueError('The path to flash must be for a .hex file.')
with open(path, 'wb') as output:
output.write(hex_file.encode('ascii')) | 0.00157 |
def calc_delay(remainingDrops):
''' Calculate the idle delay
Minimum play time for cards to drop is ~20min again. Except for accounts
that requested a refund?
Re-check every 15 mintes if there are more than 1 card drops remaining.
If only one drop remains, check every 5 minutes
'''
global sameDelay, lastDelay
# Reset lastDelay for new appids
if remainingDrops > 1:
lastDelay = 5
sameDelay = 0
if remainingDrops > 2:
return 15 * 60 # Check every 15 minutes
elif remainingDrops == 2:
return 10 * 60 # Check every 10 minutes
else:
# decrease delay by one minute every two calls
if lastDelay > 1:
if sameDelay == 2:
sameDelay = 0
lastDelay -= 1
sameDelay += 1
return lastDelay * 60 | 0.004662 |
def two_phase_dP_dz_acceleration(m, D, x, rhol, rhog, dv_dP_l, dv_dP_g, dx_dP,
dP_dL, dA_dL):
r'''This function handles calculation of two-phase liquid-gas pressure drop
due to acceleration for flow inside channels. This is a continuous
calculation, providing the differential in pressure per unit length and
should be called as part of an integration routine ([1]_, [2]_, [3]_).
.. math::
-\left(\frac{\partial P}{\partial L}\right)_{A} = G^2
\left(\left(\frac{1}{\rho_g} - \frac{1}{\rho_l}\right)\frac{\partial P}
{\partial L}\frac{\partial x}{\partial P} +
\frac{\partial P}{\partial L}\left[x \frac{\partial (1/\rho_g)}
{\partial P} + (1-x) \frac{\partial (1/\rho_l)}{\partial P}
\right] \right) - \frac{G^2}{\rho_{hom}}\frac{1}{A}\frac{\partial A}
{\partial L}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
D : float
Diameter of pipe, [m]
x : float
Quality of fluid [-]
rhol : float
Liquid density, [kg/m^3]
rhog : float
Gas density, [kg/m^3]
dv_dP_l : float
Derivative of mass specific volume of the liquid phase with respect to
pressure, [m^3/(kg*Pa)]
dv_dP_g : float
Derivative of mass specific volume of the gas phase with respect to
pressure, [m^3/(kg*Pa)]
dx_dP : float
Derivative of mass quality of the two-phase fluid with respect to
pressure (numerical derivatives may be convenient for this), [1/Pa]
dP_dL : float
Pressure drop per unit length of pipe, [Pa/m]
dA_dL : float
Change in area of pipe per unit length of pipe, [m^2/m]
Returns
-------
dP_dz : float
Acceleration component of pressure drop for two-phase flow, [Pa/m]
Notes
-----
This calculation has the `homogeneous` model built in to it as its
derivation is shown in [1]_. The discrete calculation is more flexible as
different void fractions may be used.
Examples
--------
>>> two_phase_dP_dz_acceleration(m=1, D=0.1, x=0.372, rhol=827.1,
... rhog=3.919, dv_dP_l=-5e-12, dv_dP_g=-4e-7, dx_dP=-2e-7, dP_dL=120.0,
... dA_dL=0.0001)
20.137876617489034
References
----------
.. [1] Shoham, Ovadia. Mechanistic Modeling of Gas-Liquid Two-Phase Flow in
Pipes. Pap/Cdr edition. Richardson, TX: Society of Petroleum Engineers,
2006.
.. [2] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [3] Kim, Sung-Min, and Issam Mudawar. "Review of Databases and
Predictive Methods for Pressure Drop in Adiabatic, Condensing and
Boiling Mini/Micro-Channel Flows." International Journal of Heat and
Mass Transfer 77 (October 2014): 74-97.
doi:10.1016/j.ijheatmasstransfer.2014.04.035.
'''
A = 0.25*pi*D*D
G = m/A
t1 = (1.0/rhog - 1.0/rhol)*dP_dL*dx_dP + dP_dL*(x*dv_dP_g + (1.0 - x)*dv_dP_l)
voidage_h = homogeneous(x, rhol, rhog)
rho_h = rhol*(1.0 - voidage_h) + rhog*voidage_h
return -G*G*(t1 - dA_dL/(rho_h*A)) | 0.004666 |
def pdf(self, x, e=0., w=1., a=0.):
"""
probability density function
see: https://en.wikipedia.org/wiki/Skew_normal_distribution
:param x: input value
:param e:
:param w:
:param a:
:return:
"""
t = (x-e) / w
return 2. / w * stats.norm.pdf(t) * stats.norm.cdf(a*t) | 0.005698 |
def create_standalone_context(require=None, **settings) -> 'Context':
'''
Create a standalone ModernGL context.
Example::
# Create a context with highest possible supported version
ctx = moderngl.create_context()
# Require at least OpenGL 4.3
ctx = moderngl.create_context(require=430)
Keyword Arguments:
require (int): OpenGL version code.
Returns:
:py:class:`Context` object
'''
backend = os.environ.get('MODERNGL_BACKEND')
if backend is not None:
settings['backend'] = backend
ctx = Context.__new__(Context)
ctx.mglo, ctx.version_code = mgl.create_standalone_context(settings)
ctx._screen = None
ctx.fbo = None
ctx._info = None
ctx.extra = None
if require is not None and ctx.version_code < require:
raise ValueError('Requested OpenGL version {}, got version {}'.format(
require, ctx.version_code))
return ctx | 0.000999 |
def plot_source_topos(self, common_scale=None):
""" Plot topography of the Source decomposition.
Parameters
----------
common_scale : float, optional
If set to None, each topoplot's color axis is scaled individually. Otherwise specifies the percentile
(1-99) of values in all plot. This value is taken as the maximum color scale.
"""
if self.unmixing_ is None and self.mixing_ is None:
raise RuntimeError("No sources available (run do_mvarica first)")
self._prepare_plots(True, True)
self.plotting.plot_sources(self.topo_, self.mixmaps_, self.unmixmaps_, common_scale) | 0.007452 |
def get_eqsl_users(**kwargs):
"""Download the latest official list of `EQSL.cc`__ users. The list of users can be found here_.
Args:
url (str, optional): Download URL
Returns:
list: List containing the callsigns of EQSL users (unicode)
Raises:
IOError: When network is unavailable, file can't be downloaded or processed
Example:
The following example downloads the EQSL user list and checks if DH1TW is a user:
>>> from pyhamtools.qsl import get_eqsl_users
>>> mylist = get_eqsl_users()
>>> try:
>>> mylist.index('DH1TW')
>>> except ValueError as e:
>>> print e
'DH1TW' is not in list
.. _here: http://www.eqsl.cc/QSLCard/DownloadedFiles/AGMemberlist.txt
"""
url = ""
eqsl = []
try:
url = kwargs['url']
except KeyError:
url = "http://www.eqsl.cc/QSLCard/DownloadedFiles/AGMemberlist.txt"
try:
result = requests.get(url)
except (ConnectionError, HTTPError, Timeout) as e:
raise IOError(e)
if result.status_code == requests.codes.ok:
eqsl = re.sub("^List.+UTC", "", result.text)
eqsl = eqsl.upper().split()
else:
raise IOError("HTTP Error: " + str(result.status_code))
return eqsl | 0.002948 |
def get_current_word(self, completion=False):
"""Return current word, i.e. word at cursor position"""
ret = self.get_current_word_and_position(completion)
if ret is not None:
return ret[0] | 0.008772 |
def push(self, path, name, tag=None):
'''push an image to an S3 endpoint'''
path = os.path.abspath(path)
image = os.path.basename(path)
bot.debug("PUSH %s" % path)
if not os.path.exists(path):
bot.error('%s does not exist.' %path)
sys.exit(1)
# Extract the metadata
names = parse_image_name(remove_uri(name), tag=tag)
image_size = os.path.getsize(path) >> 20
# Create extra metadata, this is how we identify the image later
# *important* bug in boto3 will return these capitalized
# see https://github.com/boto/boto3/issues/1709
metadata = {'sizemb': "%s" % image_size,
'client': 'sregistry' }
self.bucket.upload_file(path, names['storage_uri'], {"Metadata": metadata }) | 0.006588 |
def get_next_sort_string(self, field):
"""
If we're already sorted by the field then the sort query
returned reverses the sort order.
"""
# self.sort_field is the currect sort field
if field == self.sort_field:
next_sort = self.toggle_sort_order() + field
else:
default_order_for_field = \
self._allowed_sort_fields[field]['default_direction']
next_sort = default_order_for_field + field
return self.get_sort_string(next_sort) | 0.00369 |
def get_assessment_offered_ids_by_bank(self, bank_id):
"""Gets the list of ``AssessmentOffered`` ``Ids`` associated with a ``Bank``.
arg: bank_id (osid.id.Id): ``Id`` of the ``Bank``
return: (osid.id.IdList) - list of related assessment offered
``Ids``
raise: NotFound - ``bank_id`` is not found
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resource_ids_by_bin
id_list = []
for assessment_offered in self.get_assessments_offered_by_bank(bank_id):
id_list.append(assessment_offered.get_id())
return IdList(id_list) | 0.004405 |
def get_values(self, context_type):
"""
Get the values valid on this line.
:param context_type: "ENV" or "LABEL"
:return: values of given type valid on this line
"""
if context_type.upper() == "ENV":
return self.envs
elif context_type.upper() == "LABEL":
return self.labels | 0.00565 |
def rotate_image_180():
''' Rotate the image '''
# Create the media service
mycam = ONVIFCamera('192.168.0.112', 80, 'admin', '12345')
media_service = mycam.create_media_service()
profiles = media_service.GetProfiles()
# Use the first profile and Profiles have at least one
token = profiles[0]._token
# Get all video source configurations
configurations_list = media_service.GetVideoSourceConfigurations()
# Use the first profile and Profiles have at least one
video_source_configuration = configurations_list[0]
# Enable rotate
video_source_configuration.Extension[0].Rotate[0].Mode[0] = 'OFF'
# Create request type instance
request = media_service.create_type('SetVideoSourceConfiguration')
request.Configuration = video_source_configuration
# ForcePersistence is obsolete and should always be assumed to be True
request.ForcePersistence = True
# Set the video source configuration
media_service.SetVideoSourceConfiguration(request) | 0.000978 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.