code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def is_cached(self):
"""Returns true if this rule is already cached."""
# TODO: cache by target+hash, not per file.
try:
for item in self.rule.output_files:
log.info(item)
self.cachemgr.in_cache(item, self._metahash())
except cache.CacheMiss:
log.info('[%s]: Not cached.', self.address)
return False
else:
log.info('[%s]: found in cache.', self.address)
return True | Returns true if this rule is already cached. |
def write_header(self):
"""Write `header` to `file`.
See Also
--------
write_data
"""
for properties in self.header.values():
value = properties['value']
offset_bytes = int(properties['offset'])
self.file.seek(offset_bytes)
value.tofile(self.file) | Write `header` to `file`.
See Also
--------
write_data |
def cancelAllPendingResults( self ):
"""Cancel all pending results."""
# grab all the pending job ids
jobs = self.pendingResults()
if len(jobs) > 0:
# abort in the cluster
self._abortJobs(jobs)
# cancel in the notebook
self.notebook().cancelAllPendingResults() | Cancel all pending results. |
def from_meta(cls, meta, meta_all=None):
"""Copy DocstringMeta from another instance."""
if len(meta.args) == 2:
name = meta.args[1]
meta_type = None
for x in meta_all:
if x.args[1] == name and x.args[0] == 'type':
meta_type = x.description
break
return cls(args=meta.args, description=meta.description, type=meta_type)
else:
return cls(args=meta.args, description=meta.description) | Copy DocstringMeta from another instance. |
def issue_comments(self, issue_id_or_key, extra_query_params={}):
"""
client = BacklogClient("your_space_name", "your_api_key")
client.issue_comments("YOUR_PROJECT-999")
"""
return self.do("GET", "issues/{issue_id_or_key}/comments",
url_params={"issue_id_or_key": issue_id_or_key},
query_params=extra_query_params
) | client = BacklogClient("your_space_name", "your_api_key")
client.issue_comments("YOUR_PROJECT-999") |
def to_sigproc_keyword(keyword, value=None):
""" Generate a serialized string for a sigproc keyword:value pair
If value=None, just the keyword will be written with no payload.
Data type is inferred by keyword name (via a lookup table)
Args:
keyword (str): Keyword to write
value (None, float, str, double or angle): value to write to file
Returns:
value_str (str): serialized string to write to file.
"""
keyword = bytes(keyword)
if value is None:
return np.int32(len(keyword)).tostring() + keyword
else:
dtype = header_keyword_types[keyword]
dtype_to_type = {b'<l' : np.int32,
b'str' : str,
b'<d' : np.float64,
b'angle' : to_sigproc_angle}
value_dtype = dtype_to_type[dtype]
if value_dtype is str:
return np.int32(len(keyword)).tostring() + keyword + np.int32(len(value)).tostring() + value
else:
return np.int32(len(keyword)).tostring() + keyword + value_dtype(value).tostring() | Generate a serialized string for a sigproc keyword:value pair
If value=None, just the keyword will be written with no payload.
Data type is inferred by keyword name (via a lookup table)
Args:
keyword (str): Keyword to write
value (None, float, str, double or angle): value to write to file
Returns:
value_str (str): serialized string to write to file. |
def print_report(label, user, system, real):
"""
Prints the report of one step of a benchmark.
"""
print("{:<12s} {:12f} {:12f} ( {:12f} )".format(label,
user,
system,
real)) | Prints the report of one step of a benchmark. |
def encode(self, b64=False, always_bytes=True):
"""Encode the packet for transmission."""
if self.binary and not b64:
encoded_packet = six.int2byte(self.packet_type)
else:
encoded_packet = six.text_type(self.packet_type)
if self.binary and b64:
encoded_packet = 'b' + encoded_packet
if self.binary:
if b64:
encoded_packet += base64.b64encode(self.data).decode('utf-8')
else:
encoded_packet += self.data
elif isinstance(self.data, six.string_types):
encoded_packet += self.data
elif isinstance(self.data, dict) or isinstance(self.data, list):
encoded_packet += self.json.dumps(self.data,
separators=(',', ':'))
elif self.data is not None:
encoded_packet += str(self.data)
if always_bytes and not isinstance(encoded_packet, binary_types):
encoded_packet = encoded_packet.encode('utf-8')
return encoded_packet | Encode the packet for transmission. |
def start(self, daemon=True):
"""
Start driving the chain asynchronously, return immediately
:param daemon: ungracefully kill the driver when the program terminates
:type daemon: bool
"""
if self._run_lock.acquire(False):
try:
# there is a short race window in which `start` release the lock,
# but `run` has not picked it up yet, but the thread exists anyway
if self._run_thread is None:
self._run_thread = threading.Thread(target=self._run_in_thread)
self._run_thread.daemon = daemon
self._run_thread.start()
finally:
self._run_lock.release() | Start driving the chain asynchronously, return immediately
:param daemon: ungracefully kill the driver when the program terminates
:type daemon: bool |
def softmax_to_unary(sm, GT_PROB=1):
"""Deprecated, use `unary_from_softmax` instead."""
warning("pydensecrf.softmax_to_unary is deprecated, use unary_from_softmax instead.")
scale = None if GT_PROB == 1 else GT_PROB
return unary_from_softmax(sm, scale, clip=None) | Deprecated, use `unary_from_softmax` instead. |
def teardown(self):
'''
Clean up the target once all tests are completed
'''
if self.controller:
self.controller.teardown()
for monitor in self.monitors:
monitor.teardown() | Clean up the target once all tests are completed |
def notify_slaves(self):
"""Checks to see if slaves should be notified, and notifies them if needed"""
if self.disable_slave_notify is not None:
LOGGER.debug('Slave notifications disabled')
return False
if self.zone_data()['kind'] == 'Master':
response_code = self._put('/zones/' + self.domain + '/notify').status_code
if response_code == 200:
LOGGER.debug('Slave(s) notified')
return True
LOGGER.debug('Slave notification failed with code %i', response_code)
else:
LOGGER.debug('Zone type should be \'Master\' for slave notifications')
return False | Checks to see if slaves should be notified, and notifies them if needed |
def to_api_data(self):
""" Returns a dict to communicate with the server
:rtype: dict
"""
data = {}
# recurrence pattern
if self.__interval and isinstance(self.__interval, int):
recurrence_pattern = data[self._cc('pattern')] = {}
recurrence_pattern[self._cc('type')] = 'daily'
recurrence_pattern[self._cc('interval')] = self.__interval
if self.__days_of_week and isinstance(self.__days_of_week,
(list, tuple, set)):
recurrence_pattern[self._cc('type')] = 'relativeMonthly'
recurrence_pattern[self._cc('daysOfWeek')] = list(
self.__days_of_week)
if self.__first_day_of_week:
recurrence_pattern[self._cc('type')] = 'weekly'
recurrence_pattern[
self._cc('firstDayOfWeek')] = self.__first_day_of_week
elif self.__month and isinstance(self.__month, int):
recurrence_pattern[self._cc('type')] = 'relativeYearly'
recurrence_pattern[self._cc('month')] = self.__month
if self.__index:
recurrence_pattern[self._cc('index')] = self.__index
else:
if self.__index:
recurrence_pattern[self._cc('index')] = self.__index
elif self.__day_of_month and isinstance(self.__day_of_month, int):
recurrence_pattern[self._cc('type')] = 'absoluteMonthly'
recurrence_pattern[self._cc('dayOfMonth')] = self.__day_of_month
if self.__month and isinstance(self.__month, int):
recurrence_pattern[self._cc('type')] = 'absoluteYearly'
recurrence_pattern[self._cc('month')] = self.__month
# recurrence range
if self.__start_date:
recurrence_range = data[self._cc('range')] = {}
recurrence_range[self._cc('type')] = 'noEnd'
recurrence_range[
self._cc('startDate')] = self.__start_date.isoformat()
recurrence_range[
self._cc('recurrenceTimeZone')] = self.__recurrence_time_zone
if self.__end_date:
recurrence_range[self._cc('type')] = 'endDate'
recurrence_range[
self._cc('endDate')] = self.__end_date.isoformat()
elif self.__occurrences is not None and isinstance(
self.__occurrences,
int):
recurrence_range[self._cc('type')] = 'numbered'
recurrence_range[
self._cc('numberOfOccurrences')] = self.__occurrences
return data | Returns a dict to communicate with the server
:rtype: dict |
def get_gradebook_hierarchy_session(self, proxy):
"""Gets the session traversing gradebook hierarchies.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookHierarchySession) - a
``GradebookHierarchySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_hierarchy() is
false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_hierarchy()`` is true.*
"""
if not self.supports_gradebook_hierarchy():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.GradebookHierarchySession(proxy=proxy, runtime=self._runtime) | Gets the session traversing gradebook hierarchies.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookHierarchySession) - a
``GradebookHierarchySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_hierarchy() is
false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_hierarchy()`` is true.* |
def create(self, num):
"""
Creates the environment
in your subclassed create function include the line below
super().build(arg1, arg2, arg2, ...)
"""
self.log.record_process('enviroment.py', 'Creating ' + str(num) + ' environments - ' + self.name) | Creates the environment
in your subclassed create function include the line below
super().build(arg1, arg2, arg2, ...) |
def get_moments(metricParams, vary_fmax=False, vary_density=None):
"""
This function will calculate the various integrals (moments) that are
needed to compute the metric used in template bank placement and
coincidence.
Parameters
-----------
metricParams : metricParameters instance
Structure holding all the options for construction of the metric.
vary_fmax : boolean, optional (default False)
If set to False the metric and rotations are calculated once, for the
full range of frequency [f_low,f_upper).
If set to True the metric and rotations are calculated multiple times,
for frequency ranges [f_low,f_low + i*vary_density), where i starts at
1 and runs up until f_low + (i+1)*vary_density > f_upper.
Thus values greater than f_upper are *not* computed.
The calculation for the full range [f_low,f_upper) is also done.
vary_density : float, optional
If vary_fmax is True, this will be used in computing the frequency
ranges as described for vary_fmax.
Returns
--------
None : None
**THIS FUNCTION RETURNS NOTHING**
The following will be **added** to the metricParams structure
metricParams.moments : Moments structure
This contains the result of all the integrals used in computing the
metrics above. It can be used for the ethinca components calculation,
or other similar calculations. This is composed of two compound
dictionaries. The first entry indicates which moment is being
calculated and the second entry indicates the upper frequency cutoff
that was used.
In all cases x = f/f0.
For the first entries the options are:
moments['J%d' %(i)][f_cutoff]
This stores the integral of
x**((-i)/3.) * delta X / PSD(x)
moments['log%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.))) x**((-i)/3.) * delta X / PSD(x)
moments['loglog%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.)))**2 x**((-i)/3.) * delta X / PSD(x)
moments['loglog%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.)))**3 x**((-i)/3.) * delta X / PSD(x)
moments['loglog%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.)))**4 x**((-i)/3.) * delta X / PSD(x)
The second entry stores the frequency cutoff used when computing
the integral. See description of the vary_fmax option above.
All of these values are nomralized by a factor of
x**((-7)/3.) * delta X / PSD(x)
The normalization factor can be obtained in
moments['I7'][f_cutoff]
"""
# NOTE: Unless the TaylorR2F4 metric is used the log^3 and log^4 terms are
# not needed. As this calculation is not too slow compared to bank
# placement we just do this anyway.
psd_amp = metricParams.psd.data
psd_f = numpy.arange(len(psd_amp), dtype=float) * metricParams.deltaF
new_f, new_amp = interpolate_psd(psd_f, psd_amp, metricParams.deltaF)
# Need I7 first as this is the normalization factor
funct = lambda x,f0: 1
I7 = calculate_moment(new_f, new_amp, metricParams.fLow, \
metricParams.fUpper, metricParams.f0, funct,\
vary_fmax=vary_fmax, vary_density=vary_density)
# Do all the J moments
moments = {}
moments['I7'] = I7
for i in range(-7,18):
funct = lambda x,f0: x**((-i+7)/3.)
moments['J%d' %(i)] = calculate_moment(new_f, new_amp, \
metricParams.fLow, metricParams.fUpper, \
metricParams.f0, funct, norm=I7, \
vary_fmax=vary_fmax, vary_density=vary_density)
# Do the logx multiplied by some power terms
for i in range(-1,18):
funct = lambda x,f0: (numpy.log((x*f0)**(1./3.))) * x**((-i+7)/3.)
moments['log%d' %(i)] = calculate_moment(new_f, new_amp, \
metricParams.fLow, metricParams.fUpper, \
metricParams.f0, funct, norm=I7, \
vary_fmax=vary_fmax, vary_density=vary_density)
# Do the loglog term
for i in range(-1,18):
funct = lambda x,f0: (numpy.log((x*f0)**(1./3.)))**2 * x**((-i+7)/3.)
moments['loglog%d' %(i)] = calculate_moment(new_f, new_amp, \
metricParams.fLow, metricParams.fUpper, \
metricParams.f0, funct, norm=I7, \
vary_fmax=vary_fmax, vary_density=vary_density)
# Do the logloglog term
for i in range(-1,18):
funct = lambda x,f0: (numpy.log((x*f0)**(1./3.)))**3 * x**((-i+7)/3.)
moments['logloglog%d' %(i)] = calculate_moment(new_f, new_amp, \
metricParams.fLow, metricParams.fUpper, \
metricParams.f0, funct, norm=I7, \
vary_fmax=vary_fmax, vary_density=vary_density)
# Do the logloglog term
for i in range(-1,18):
funct = lambda x,f0: (numpy.log((x*f0)**(1./3.)))**4 * x**((-i+7)/3.)
moments['loglogloglog%d' %(i)] = calculate_moment(new_f, new_amp, \
metricParams.fLow, metricParams.fUpper, \
metricParams.f0, funct, norm=I7, \
vary_fmax=vary_fmax, vary_density=vary_density)
metricParams.moments = moments | This function will calculate the various integrals (moments) that are
needed to compute the metric used in template bank placement and
coincidence.
Parameters
-----------
metricParams : metricParameters instance
Structure holding all the options for construction of the metric.
vary_fmax : boolean, optional (default False)
If set to False the metric and rotations are calculated once, for the
full range of frequency [f_low,f_upper).
If set to True the metric and rotations are calculated multiple times,
for frequency ranges [f_low,f_low + i*vary_density), where i starts at
1 and runs up until f_low + (i+1)*vary_density > f_upper.
Thus values greater than f_upper are *not* computed.
The calculation for the full range [f_low,f_upper) is also done.
vary_density : float, optional
If vary_fmax is True, this will be used in computing the frequency
ranges as described for vary_fmax.
Returns
--------
None : None
**THIS FUNCTION RETURNS NOTHING**
The following will be **added** to the metricParams structure
metricParams.moments : Moments structure
This contains the result of all the integrals used in computing the
metrics above. It can be used for the ethinca components calculation,
or other similar calculations. This is composed of two compound
dictionaries. The first entry indicates which moment is being
calculated and the second entry indicates the upper frequency cutoff
that was used.
In all cases x = f/f0.
For the first entries the options are:
moments['J%d' %(i)][f_cutoff]
This stores the integral of
x**((-i)/3.) * delta X / PSD(x)
moments['log%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.))) x**((-i)/3.) * delta X / PSD(x)
moments['loglog%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.)))**2 x**((-i)/3.) * delta X / PSD(x)
moments['loglog%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.)))**3 x**((-i)/3.) * delta X / PSD(x)
moments['loglog%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.)))**4 x**((-i)/3.) * delta X / PSD(x)
The second entry stores the frequency cutoff used when computing
the integral. See description of the vary_fmax option above.
All of these values are nomralized by a factor of
x**((-7)/3.) * delta X / PSD(x)
The normalization factor can be obtained in
moments['I7'][f_cutoff] |
def find_first_version(self):
"""Finds the first version of igraph that exists in the nightly build
repo from the version numbers provided in ``self.versions_to_try``."""
for version in self.versions_to_try:
remote_url = self.get_download_url(version=version)
if http_url_exists(remote_url):
return version, remote_url
return None, None | Finds the first version of igraph that exists in the nightly build
repo from the version numbers provided in ``self.versions_to_try``. |
def file_is_present(self, file_path):
"""
check if file 'file_path' is present, raises IOError if file_path
is not a file
:param file_path: str, path to the file
:return: True if file exists, False if file does not exist
"""
p = self.p(file_path)
if not os.path.exists(p):
return False
if not os.path.isfile(p):
raise IOError("%s is not a file" % file_path)
return True | check if file 'file_path' is present, raises IOError if file_path
is not a file
:param file_path: str, path to the file
:return: True if file exists, False if file does not exist |
def update_members(self, list_id, data):
"""
Batch subscribe or unsubscribe list members.
Only the members array is required in the request body parameters.
Within the members array, each member requires an email_address
and either a status or status_if_new. The update_existing parameter
will also be considered required to help prevent accidental updates
to existing members and will default to false if not present.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"members": array*
[
{
"email_address": string*,
"status": string* (Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'pending'),
"status_if_new": string* (Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'pending')
}
],
"update_existing": boolean*
}
"""
self.list_id = list_id
if 'members' not in data:
raise KeyError('The update must have at least one member')
else:
if not len(data['members']) <= 500:
raise ValueError('You may only batch sub/unsub 500 members at a time')
for member in data['members']:
if 'email_address' not in member:
raise KeyError('Each list member must have an email_address')
check_email(member['email_address'])
if 'status' not in member and 'status_if_new' not in member:
raise KeyError('Each list member must have either a status or a status_if_new')
valid_statuses = ['subscribed', 'unsubscribed', 'cleaned', 'pending']
if 'status' in member and member['status'] not in valid_statuses:
raise ValueError('The list member status must be one of "subscribed", "unsubscribed", "cleaned", or '
'"pending"')
if 'status_if_new' in member and member['status_if_new'] not in valid_statuses:
raise ValueError('The list member status_if_new must be one of "subscribed", "unsubscribed", '
'"cleaned", or "pending"')
if 'update_existing' not in data:
data['update_existing'] = False
return self._mc_client._post(url=self._build_path(list_id), data=data) | Batch subscribe or unsubscribe list members.
Only the members array is required in the request body parameters.
Within the members array, each member requires an email_address
and either a status or status_if_new. The update_existing parameter
will also be considered required to help prevent accidental updates
to existing members and will default to false if not present.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"members": array*
[
{
"email_address": string*,
"status": string* (Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'pending'),
"status_if_new": string* (Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'pending')
}
],
"update_existing": boolean*
} |
def show(self):
"""
Print information to stdout about the current session.
Gets all APKs, all DEX files and all Analysis objects.
"""
print("APKs in Session: {}".format(len(self.analyzed_apk)))
for d, a in self.analyzed_apk.items():
print("\t{}: {}".format(d, a))
print("DEXs in Session: {}".format(len(self.analyzed_dex)))
for d, dex in self.analyzed_dex.items():
print("\t{}: {}".format(d, dex))
print("Analysis in Session: {}".format(len(self.analyzed_vms)))
for d, a in self.analyzed_vms.items():
print("\t{}: {}".format(d, a)) | Print information to stdout about the current session.
Gets all APKs, all DEX files and all Analysis objects. |
def predict_fixation_duration(
durations, angles, length_diffs, dataset=None, params=None):
"""
Fits a non-linear piecewise regression to fixtaion durations for a fixmat.
Returns corrected fixation durations.
"""
if dataset is None:
dataset = np.ones(durations.shape)
corrected_durations = np.nan * np.ones(durations.shape)
for i, ds in enumerate(np.unique(dataset)):
e = lambda v, x, y, z: (leastsq_dual_model(x, z, *v) - y)
v0 = [120, 220.0, -.1, 0.5, .1, .1]
id_ds = dataset == ds
idnan = (
~np.isnan(angles)) & (
~np.isnan(durations)) & (
~np.isnan(length_diffs))
v, s = leastsq(
e, v0, args=(
angles[
idnan & id_ds], durations[
idnan & id_ds], length_diffs[
idnan & id_ds]), maxfev=10000)
corrected_durations[id_ds] = (durations[id_ds] -
(leastsq_dual_model(angles[id_ds], length_diffs[id_ds], *v)))
if params is not None:
params['v' + str(i)] = v
params['s' + str(i)] = s
return corrected_durations | Fits a non-linear piecewise regression to fixtaion durations for a fixmat.
Returns corrected fixation durations. |
def diff_tree(candidate_config=None,
candidate_path=None,
running_config=None,
running_path=None,
saltenv='base'):
'''
Return the diff, as Python dictionary, between the candidate and the running
configuration.
candidate_config
The candidate configuration sent as text. This argument is ignored when
``candidate_path`` is set.
candidate_path
Absolute or remote path from where to load the candidate configuration
text. This argument allows any URI supported by
:py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``,
``https://``, ``s3://``, ``ftp:/``, etc.
running_config
The running configuration sent as text. This argument is ignored when
``running_path`` is set.
running_path
Absolute or remote path from where to load the runing configuration
text. This argument allows any URI supported by
:py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``,
``https://``, ``s3://``, ``ftp:/``, etc.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file.
Ignored if ``candidate_path`` or ``running_path`` is not a
``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' iosconfig.diff_tree candidate_path=salt://path/to/candidate.cfg running_path=salt://path/to/running.cfg
'''
candidate_tree = tree(config=candidate_config,
path=candidate_path,
saltenv=saltenv)
running_tree = tree(config=running_config,
path=running_path,
saltenv=saltenv)
return salt.utils.dictdiffer.deep_diff(running_tree, candidate_tree) | Return the diff, as Python dictionary, between the candidate and the running
configuration.
candidate_config
The candidate configuration sent as text. This argument is ignored when
``candidate_path`` is set.
candidate_path
Absolute or remote path from where to load the candidate configuration
text. This argument allows any URI supported by
:py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``,
``https://``, ``s3://``, ``ftp:/``, etc.
running_config
The running configuration sent as text. This argument is ignored when
``running_path`` is set.
running_path
Absolute or remote path from where to load the runing configuration
text. This argument allows any URI supported by
:py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``,
``https://``, ``s3://``, ``ftp:/``, etc.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file.
Ignored if ``candidate_path`` or ``running_path`` is not a
``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' iosconfig.diff_tree candidate_path=salt://path/to/candidate.cfg running_path=salt://path/to/running.cfg |
def ensure_unique(qs, field_name, value, exclude_id=None):
"""
Makes sure that `value` is unique on model.fieldname. And nonempty.
"""
orig = value
if not value:
value = "None"
for x in itertools.count(1):
if not qs.exclude(id=exclude_id).filter(**{field_name: value}).exists():
break
if orig:
value = '%s-%d' % (orig, x)
else:
value = '%d' % x
return value | Makes sure that `value` is unique on model.fieldname. And nonempty. |
def check(self, obj, condition) -> "WriteTransaction":
"""
Add a condition which must be met for the transaction to commit.
While the condition is checked against the provided object, that object will not be modified. It is only
used to provide the hash and range key to apply the condition to.
At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will
be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object
multiple times.
:param obj: The object to use for the transaction condition. This object will not be modified.
:param condition: A condition on an object which must hold for the transaction to commit.
:return: this transaction for chaining
"""
self._extend([TxItem.new("check", obj, condition)])
return self | Add a condition which must be met for the transaction to commit.
While the condition is checked against the provided object, that object will not be modified. It is only
used to provide the hash and range key to apply the condition to.
At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will
be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object
multiple times.
:param obj: The object to use for the transaction condition. This object will not be modified.
:param condition: A condition on an object which must hold for the transaction to commit.
:return: this transaction for chaining |
def access_zipped_assets(cls, static_module_name, static_path, dir_location=None):
"""
Create a copy of static resource files as we can't serve them from within the pex file.
:param static_module_name: Module name containing module to cache in a tempdir
:type static_module_name: string, for example 'twitter.common.zookeeper' or similar
:param static_path: Module name, for example 'serverset'
:param dir_location: create a new temporary directory inside, or None to have one created
:returns temp_dir: Temporary directory with the zipped assets inside
:rtype: str
"""
# asset_path is initially a module name that's the same as the static_path, but will be
# changed to walk the directory tree
def walk_zipped_assets(static_module_name, static_path, asset_path, temp_dir):
for asset in resource_listdir(static_module_name, asset_path):
asset_target = os.path.normpath(
os.path.join(os.path.relpath(asset_path, static_path), asset))
if resource_isdir(static_module_name, os.path.join(asset_path, asset)):
safe_mkdir(os.path.join(temp_dir, asset_target))
walk_zipped_assets(static_module_name, static_path, os.path.join(asset_path, asset),
temp_dir)
else:
with open(os.path.join(temp_dir, asset_target), 'wb') as fp:
path = os.path.join(static_path, asset_target)
file_data = resource_string(static_module_name, path)
fp.write(file_data)
if dir_location is None:
temp_dir = safe_mkdtemp()
else:
temp_dir = dir_location
walk_zipped_assets(static_module_name, static_path, static_path, temp_dir)
return temp_dir | Create a copy of static resource files as we can't serve them from within the pex file.
:param static_module_name: Module name containing module to cache in a tempdir
:type static_module_name: string, for example 'twitter.common.zookeeper' or similar
:param static_path: Module name, for example 'serverset'
:param dir_location: create a new temporary directory inside, or None to have one created
:returns temp_dir: Temporary directory with the zipped assets inside
:rtype: str |
def create_object(self, filename, img_properties=None):
"""Create an image object on local disk from the given file. The file
is copied to a new local directory that is created for the image object.
The optional list of image properties will be associated with the new
object together with the set of default properties for images.
Parameters
----------
filename : string
Path to file on disk
img_properties : Dictionary, optional
Set of image properties.
Returns
-------
ImageHandle
Handle for created image object
"""
# Get the file name, i.e., last component of the given absolute path
prop_name = os.path.basename(os.path.normpath(filename))
# Ensure that the image file has a valid suffix. Currently we do not
# check whether the file actually is an image. If the suffix is valid
# get the associated Mime type from the dictionary.
prop_mime = None
pos = prop_name.rfind('.')
if pos >= 0:
suffix = prop_name[pos:].lower()
if suffix in VALID_IMGFILE_SUFFIXES:
prop_mime = VALID_IMGFILE_SUFFIXES[suffix]
if not prop_mime:
raise ValueError('unsupported image type: ' + prop_name)
# Create a new object identifier.
identifier = str(uuid.uuid4()).replace('-','')
# The sub-folder to store the image is given by the first two
# characters of the identifier.
image_dir = self.get_directory(identifier)
# Create the directory if it doesn't exists
if not os.access(image_dir, os.F_OK):
os.makedirs(image_dir)
# Create the initial set of properties for the new image object.
properties = {
datastore.PROPERTY_NAME: prop_name,
datastore.PROPERTY_FILENAME : prop_name,
datastore.PROPERTY_FILESIZE : os.path.getsize(filename),
datastore.PROPERTY_MIMETYPE : prop_mime
}
# Add additional image properties (if given). Note that this will not
# override the default image properties.
if not img_properties is None:
for prop in img_properties:
if not prop in properties:
properties[prop] = img_properties[prop]
# Copy original file to new object's directory
shutil.copyfile(filename, os.path.join(image_dir, prop_name))
# Create object handle and store it in database before returning it
obj = ImageHandle(identifier, properties, image_dir)
self.insert_object(obj)
return obj | Create an image object on local disk from the given file. The file
is copied to a new local directory that is created for the image object.
The optional list of image properties will be associated with the new
object together with the set of default properties for images.
Parameters
----------
filename : string
Path to file on disk
img_properties : Dictionary, optional
Set of image properties.
Returns
-------
ImageHandle
Handle for created image object |
def column(self):
"""获取文章所在专栏.
:return: 文章所在专栏
:rtype: Column
"""
from .column import Column
if 'column' in self.soup:
url = Column_Url + '/' + self.soup['column']['slug']
name = self.soup['column']['name']
return Column(url, name, session=self._session)
else:
return None | 获取文章所在专栏.
:return: 文章所在专栏
:rtype: Column |
def pretokenized_tfrecord_dataset(filenames,
text2self,
eos_included,
repeat,
batch_size,
sequence_length):
"""Reads tensor2tensor-style data files.
The dataset is defined by sets of TFRecord files of TFExample protos.
There should be a "targets" feature (a 1d tensor of integers)
If not text2self, there should also be an "inputs" feature.
Other features get ignored.
eos_included specifies whether the inputs and targets were written with an
EOS token, as in tensor2tensor
Args:
filenames: a list of strings
text2self: a boolean
eos_included: a boolean
repeat: a boolean
batch_size: an integer
sequence_length: an integer
Returns:
A tf.data.Dataset of batches
"""
dataset = tf.data.TFRecordDataset(filenames, buffer_size=64 * 1024 * 1024)
if repeat:
dataset = dataset.repeat()
keys = ["targets"] if text2self else ["inputs", "targets"]
def decode_example(serialized_example):
"""Return a dict of Tensors from a serialized tensorflow.Example."""
data_fields = {}
data_items_to_decoders = {}
for k in keys:
data_fields[k] = tf.VarLenFeature(tf.int64)
data_items_to_decoders[k] = tf.contrib.slim.tfexample_decoder.Tensor(k)
decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(
data_fields, data_items_to_decoders)
decode_items = list(sorted(data_items_to_decoders))
decoded = decoder.decode(serialized_example, items=decode_items)
if not eos_included:
decoded = [tf.concat([v, [1]], 0) for v in decoded]
return dict(zip(decode_items, decoded))
dataset = dataset.map(decode_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return pack_and_batch(dataset, batch_size, sequence_length) | Reads tensor2tensor-style data files.
The dataset is defined by sets of TFRecord files of TFExample protos.
There should be a "targets" feature (a 1d tensor of integers)
If not text2self, there should also be an "inputs" feature.
Other features get ignored.
eos_included specifies whether the inputs and targets were written with an
EOS token, as in tensor2tensor
Args:
filenames: a list of strings
text2self: a boolean
eos_included: a boolean
repeat: a boolean
batch_size: an integer
sequence_length: an integer
Returns:
A tf.data.Dataset of batches |
def flatten(inputs, scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
scope: Optional scope for name_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong.
"""
if len(inputs.get_shape()) < 2:
raise ValueError('Inputs must be have a least 2 dimensions')
dims = inputs.get_shape()[1:]
k = dims.num_elements()
with tf.name_scope(scope, 'Flatten', [inputs]):
return tf.reshape(inputs, [-1, k]) | Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
scope: Optional scope for name_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong. |
def _set_police_priority_map(self, v, load=False):
"""
Setter method for police_priority_map, mapped from YANG variable /police_priority_map (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_police_priority_map is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_police_priority_map() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",police_priority_map.police_priority_map, yang_name="police-priority-map", rest_name="police-priority-map", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Policer Priority Map Configuration', u'sort-priority': u'69', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'policer-priority-map', u'cli-mode-name': u'config-policepmap'}}), is_container='list', yang_name="police-priority-map", rest_name="police-priority-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Policer Priority Map Configuration', u'sort-priority': u'69', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'policer-priority-map', u'cli-mode-name': u'config-policepmap'}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """police_priority_map must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",police_priority_map.police_priority_map, yang_name="police-priority-map", rest_name="police-priority-map", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Policer Priority Map Configuration', u'sort-priority': u'69', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'policer-priority-map', u'cli-mode-name': u'config-policepmap'}}), is_container='list', yang_name="police-priority-map", rest_name="police-priority-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Policer Priority Map Configuration', u'sort-priority': u'69', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'policer-priority-map', u'cli-mode-name': u'config-policepmap'}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='list', is_config=True)""",
})
self.__police_priority_map = t
if hasattr(self, '_set'):
self._set() | Setter method for police_priority_map, mapped from YANG variable /police_priority_map (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_police_priority_map is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_police_priority_map() directly. |
def set(self, time_sec, callback_fn, *args, **kwdargs):
"""Convenience function to create and set a timer.
Equivalent to:
timer = timer_factory.timer()
timer.set_callback('expired', callback_fn, *args, **kwdargs)
timer.set(time_sec)
"""
timer = self.timer()
timer.set_callback('expired', callback_fn, *args, **kwdargs)
timer.set(time_sec)
return timer | Convenience function to create and set a timer.
Equivalent to:
timer = timer_factory.timer()
timer.set_callback('expired', callback_fn, *args, **kwdargs)
timer.set(time_sec) |
def default_from_address(self):
"""
Cache the coinbase address so that we don't make two requests for every
single transaction.
"""
if self._coinbase_cache_til is not None:
if time.time - self._coinbase_cache_til > 30:
self._coinbase_cache_til = None
self._coinbase_cache = None
if self._coinbase_cache is None:
self._coinbase_cache = self.get_coinbase()
return self._coinbase_cache | Cache the coinbase address so that we don't make two requests for every
single transaction. |
def reset( self ):
"""
Resets the values to the current application information.
"""
self.setValue('colorSet', XPaletteColorSet())
self.setValue('font', QApplication.font())
self.setValue('fontSize', QApplication.font().pointSize()) | Resets the values to the current application information. |
def setText(self, sequence):
"""Qt method extension."""
self.setToolTip(sequence)
super(ShortcutLineEdit, self).setText(sequence) | Qt method extension. |
def set_starting_ratio(self, ratio):
""" Set the starting conversion ratio for the next `read` call. """
from samplerate.lowlevel import src_set_ratio
if self._state is None:
self._create()
src_set_ratio(self._state, ratio)
self.ratio = ratio | Set the starting conversion ratio for the next `read` call. |
def connect(self):
"""Overrides HTTPSConnection.connect to specify TLS version"""
# Standard implementation from HTTPSConnection, which is not
# designed for extension, unfortunately
sock = socket.create_connection((self.host, self.port),
self.timeout, self.source_address)
if getattr(self, '_tunnel_host', None):
self.sock = sock # pragma: no cover
self._tunnel() # pragma: no cover
# This is the only difference; default wrap_socket uses SSLv23
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
ssl_version=ssl.PROTOCOL_TLSv1_2) | Overrides HTTPSConnection.connect to specify TLS version |
def get_by_ip(cls, ip):
'Returns Host instance for the given ip address.'
ret = cls.hosts_by_ip.get(ip)
if ret is None:
ret = cls.hosts_by_ip[ip] = [Host(ip)]
return ret | Returns Host instance for the given ip address. |
def multiply(self, a, b):
"""
:type A: List[List[int]]
:type B: List[List[int]]
:rtype: List[List[int]]
"""
if a is None or b is None: return None
m, n = len(a), len(b[0])
if len(b) != n:
raise Exception("A's column number must be equal to B's row number.")
l = len(b[0])
table_a, table_b = {}, {}
for i, row in enumerate(a):
for j, ele in enumerate(row):
if ele:
if i not in table_a: table_a[i] = {}
table_a[i][j] = ele
for i, row in enumerate(b):
for j, ele in enumerate(row):
if ele:
if i not in table_b: table_b[i] = {}
table_b[i][j] = ele
c = [[0 for j in range(l)] for i in range(m)]
for i in table_a:
for k in table_a[i]:
if k not in table_b: continue
for j in table_b[k]:
c[i][j] += table_a[i][k] * table_b[k][j]
return c | :type A: List[List[int]]
:type B: List[List[int]]
:rtype: List[List[int]] |
def download(self, url, path):
"""Download url and save data to path."""
# original_url = url
# print(url)
qurl = QUrl(url)
url = to_text_string(qurl.toEncoded(), encoding='utf-8')
logger.debug(str((url, path)))
if url in self._workers:
while not self._workers[url].finished:
return self._workers[url]
worker = DownloadWorker(url, path)
# Check download folder exists
folder = os.path.dirname(os.path.abspath(path))
if not os.path.isdir(folder):
os.makedirs(folder)
request = QNetworkRequest(qurl)
self._head_requests[url] = request
self._paths[url] = path
self._workers[url] = worker
self._manager.head(request)
self._timer.start()
return worker | Download url and save data to path. |
def is_unit(q):
'''
is_unit(q) yields True if q is a pint unit or a string that names a pint unit and False
otherwise.
'''
if isinstance(q, six.string_types):
try: return hasattr(units, q)
except: return False
else:
cls = type(q)
return cls.__module__.startswith('pint.') and cls.__name__ == 'Unit' | is_unit(q) yields True if q is a pint unit or a string that names a pint unit and False
otherwise. |
def _expand_host(self, host):
"""
Used internally to add the default port to hosts not including
portnames.
"""
if isinstance(host, basestring):
return (host, self.default_port)
return tuple(host) | Used internally to add the default port to hosts not including
portnames. |
def write(filename, mesh, write_binary=True):
"""Writes msh files, cf.
<http://gmsh.info//doc/texinfo/gmsh.html#MSH-ASCII-file-format>.
"""
if mesh.points.shape[1] == 2:
logging.warning(
"msh2 requires 3D points, but 2D points given. "
"Appending 0 third component."
)
mesh.points = numpy.column_stack(
[mesh.points[:, 0], mesh.points[:, 1], numpy.zeros(mesh.points.shape[0])]
)
if write_binary:
for key, value in mesh.cells.items():
if value.dtype != c_int:
logging.warning(
"Binary Gmsh needs 32-bit integers (got %s). Converting.",
value.dtype,
)
mesh.cells[key] = numpy.array(value, dtype=c_int)
# Gmsh cells are mostly ordered like VTK, with a few exceptions:
cells = mesh.cells.copy()
if "tetra10" in cells:
cells["tetra10"] = cells["tetra10"][:, [0, 1, 2, 3, 4, 5, 6, 7, 9, 8]]
if "hexahedron20" in cells:
cells["hexahedron20"] = cells["hexahedron20"][
:, [0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 13, 9, 16, 18, 19, 17, 10, 12, 14, 15]
]
with open(filename, "wb") as fh:
mode_idx = 1 if write_binary else 0
size_of_double = 8
fh.write(
("$MeshFormat\n2.2 {} {}\n".format(mode_idx, size_of_double)).encode(
"utf-8"
)
)
if write_binary:
fh.write(struct.pack("i", 1))
fh.write("\n".encode("utf-8"))
fh.write("$EndMeshFormat\n".encode("utf-8"))
if mesh.field_data:
_write_physical_names(fh, mesh.field_data)
# Split the cell data: gmsh:physical and gmsh:geometrical are tags, the
# rest is actual cell data.
tag_data = {}
other_data = {}
for cell_type, a in mesh.cell_data.items():
tag_data[cell_type] = {}
other_data[cell_type] = {}
for key, data in a.items():
if key in ["gmsh:physical", "gmsh:geometrical"]:
tag_data[cell_type][key] = data.astype(c_int)
else:
other_data[cell_type][key] = data
_write_nodes(fh, mesh.points, write_binary)
_write_elements(fh, cells, tag_data, write_binary)
if mesh.gmsh_periodic is not None:
_write_periodic(fh, mesh.gmsh_periodic)
for name, dat in mesh.point_data.items():
_write_data(fh, "NodeData", name, dat, write_binary)
cell_data_raw = raw_from_cell_data(other_data)
for name, dat in cell_data_raw.items():
_write_data(fh, "ElementData", name, dat, write_binary)
return | Writes msh files, cf.
<http://gmsh.info//doc/texinfo/gmsh.html#MSH-ASCII-file-format>. |
def _set_pw_profile(self, v, load=False):
"""
Setter method for pw_profile, mapped from YANG variable /pw_profile (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_pw_profile is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pw_profile() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("pw_profile_name",pw_profile.pw_profile, yang_name="pw-profile", rest_name="pw-profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pw-profile-name', extensions={u'tailf-common': {u'info': u'pw-profile for Node Specific configuration', u'callpoint': u'PWProfileBasicCallpoint', u'cli-mode-name': u'config-pw-profile-$(pw-profile-name)'}}), is_container='list', yang_name="pw-profile", rest_name="pw-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'pw-profile for Node Specific configuration', u'callpoint': u'PWProfileBasicCallpoint', u'cli-mode-name': u'config-pw-profile-$(pw-profile-name)'}}, namespace='urn:brocade.com:mgmt:brocade-pw-profile', defining_module='brocade-pw-profile', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """pw_profile must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("pw_profile_name",pw_profile.pw_profile, yang_name="pw-profile", rest_name="pw-profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pw-profile-name', extensions={u'tailf-common': {u'info': u'pw-profile for Node Specific configuration', u'callpoint': u'PWProfileBasicCallpoint', u'cli-mode-name': u'config-pw-profile-$(pw-profile-name)'}}), is_container='list', yang_name="pw-profile", rest_name="pw-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'pw-profile for Node Specific configuration', u'callpoint': u'PWProfileBasicCallpoint', u'cli-mode-name': u'config-pw-profile-$(pw-profile-name)'}}, namespace='urn:brocade.com:mgmt:brocade-pw-profile', defining_module='brocade-pw-profile', yang_type='list', is_config=True)""",
})
self.__pw_profile = t
if hasattr(self, '_set'):
self._set() | Setter method for pw_profile, mapped from YANG variable /pw_profile (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_pw_profile is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pw_profile() directly. |
def when_connected(self):
"""
Retrieve the currently-connected Protocol, or the next one to connect.
Returns:
defer.Deferred: A Deferred that fires with a connected
:class:`FedoraMessagingProtocolV2` instance. This is similar to
the whenConnected method from the Twisted endpoints APIs, which
is sadly isn't available before 16.1.0, which isn't available
in EL7.
"""
if self._client and not self._client.is_closed:
return defer.succeed(self._client)
else:
return self._client_deferred | Retrieve the currently-connected Protocol, or the next one to connect.
Returns:
defer.Deferred: A Deferred that fires with a connected
:class:`FedoraMessagingProtocolV2` instance. This is similar to
the whenConnected method from the Twisted endpoints APIs, which
is sadly isn't available before 16.1.0, which isn't available
in EL7. |
def view(self, buffer_time = 10.0, sample_size = 10000, name=None, description=None, start=False):
"""
Defines a view on a stream.
A view is a continually updated sampled buffer of a streams's tuples.
Views allow visibility into a stream from external clients such
as Jupyter Notebooks, the Streams console,
`Microsoft Excel <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.2.0/com.ibm.streams.excel.doc/doc/excel_overview.html>`_ or REST clients.
The view created by this method can be used by external clients
and through the returned :py:class:`~streamsx.topology.topology.View` object after the topology is submitted. For example a Jupyter Notebook can
declare and submit an application with views, and then
use the resultant `View` objects to visualize live data within the streams.
When the stream contains Python objects then they are converted
to JSON.
Args:
buffer_time: Specifies the buffer size to use measured in seconds.
sample_size: Specifies the number of tuples to sample per second.
name(str): Name of the view. Name must be unique within the topology. Defaults to a generated name.
description: Description of the view.
start(bool): Start buffering data when the job is submitted.
If `False` then the view starts buffering data when the first
remote client accesses it to retrieve data.
Returns:
streamsx.topology.topology.View: View object which can be used to access the data when the
topology is submitted.
.. note:: Views are only supported when submitting to distributed
contexts including Streaming Analytics service.
"""
if name is None:
name = ''.join(random.choice('0123456789abcdef') for x in range(16))
if self.oport.schema == streamsx.topology.schema.CommonSchema.Python:
if self._json_stream:
view_stream = self._json_stream
else:
self._json_stream = self.as_json(force_object=False)._layout(hidden=True)
view_stream = self._json_stream
# colocate map operator with stream that is being viewed.
if self._placeable:
self._colocate(view_stream, 'view')
else:
view_stream = self
port = view_stream.oport.name
view_config = {
'name': name,
'port': port,
'description': description,
'bufferTime': buffer_time,
'sampleSize': sample_size}
if start:
view_config['activateOption'] = 'automatic'
view_stream.oport.operator.addViewConfig(view_config)
_view = View(name)
self.topology.graph._views.append(_view)
return _view | Defines a view on a stream.
A view is a continually updated sampled buffer of a streams's tuples.
Views allow visibility into a stream from external clients such
as Jupyter Notebooks, the Streams console,
`Microsoft Excel <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.2.0/com.ibm.streams.excel.doc/doc/excel_overview.html>`_ or REST clients.
The view created by this method can be used by external clients
and through the returned :py:class:`~streamsx.topology.topology.View` object after the topology is submitted. For example a Jupyter Notebook can
declare and submit an application with views, and then
use the resultant `View` objects to visualize live data within the streams.
When the stream contains Python objects then they are converted
to JSON.
Args:
buffer_time: Specifies the buffer size to use measured in seconds.
sample_size: Specifies the number of tuples to sample per second.
name(str): Name of the view. Name must be unique within the topology. Defaults to a generated name.
description: Description of the view.
start(bool): Start buffering data when the job is submitted.
If `False` then the view starts buffering data when the first
remote client accesses it to retrieve data.
Returns:
streamsx.topology.topology.View: View object which can be used to access the data when the
topology is submitted.
.. note:: Views are only supported when submitting to distributed
contexts including Streaming Analytics service. |
def chempot_vs_gamma_plot_one(self, plt, entry, ref_delu, chempot_range,
delu_dict={}, delu_default=0, label='', JPERM2=False):
"""
Helper function to help plot the surface energy of a
single SlabEntry as a function of chemical potential.
Args:
plt (Plot): A plot.
entry (SlabEntry): Entry of the slab whose surface energy we want
to plot
ref_delu (sympy Symbol): The range stability of each slab is based
on the chempot range of this chempot. Should be a sympy Symbol
object of the format: Symbol("delu_el") where el is the name of
the element
chempot_range ([max_chempot, min_chempot]): Range to consider the
stability of the slabs.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
label (str): Label of the slab for the legend.
JPERM2 (bool): Whether to plot surface energy in /m^2 (True) or
eV/A^2 (False)
Returns:
(Plot): Plot of surface energy vs chemical potential for one entry.
"""
chempot_range = sorted(chempot_range)
# use dashed lines for slabs that are not stoichiometric
# wrt bulk. Label with formula if nonstoichiometric
ucell_comp = self.ucell_entry.composition.reduced_composition
if entry.adsorbates:
s = entry.cleaned_up_slab
clean_comp = s.composition.reduced_composition
else:
clean_comp = entry.composition.reduced_composition
mark = '--' if ucell_comp != clean_comp else '-'
delu_dict = self.set_all_variables(delu_dict, delu_default)
delu_dict[ref_delu] = chempot_range[0]
gamma_min = self.as_coeffs_dict[entry]
gamma_min = gamma_min if type(gamma_min).__name__ == \
"float" else sub_chempots(gamma_min, delu_dict)
delu_dict[ref_delu] = chempot_range[1]
gamma_max = self.as_coeffs_dict[entry]
gamma_max = gamma_max if type(gamma_max).__name__ == \
"float" else sub_chempots(gamma_max, delu_dict)
gamma_range = [gamma_min, gamma_max]
se_range = np.array(gamma_range) * EV_PER_ANG2_TO_JOULES_PER_M2 \
if JPERM2 else gamma_range
mark = entry.mark if entry.mark else mark
c = entry.color if entry.color else self.color_dict[entry]
plt.plot(chempot_range, se_range, mark, color=c, label=label)
return plt | Helper function to help plot the surface energy of a
single SlabEntry as a function of chemical potential.
Args:
plt (Plot): A plot.
entry (SlabEntry): Entry of the slab whose surface energy we want
to plot
ref_delu (sympy Symbol): The range stability of each slab is based
on the chempot range of this chempot. Should be a sympy Symbol
object of the format: Symbol("delu_el") where el is the name of
the element
chempot_range ([max_chempot, min_chempot]): Range to consider the
stability of the slabs.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
label (str): Label of the slab for the legend.
JPERM2 (bool): Whether to plot surface energy in /m^2 (True) or
eV/A^2 (False)
Returns:
(Plot): Plot of surface energy vs chemical potential for one entry. |
def add_device_notification(self, data_name, attr, callback, user_handle=None):
# type: (str, NotificationAttrib, Callable, int) -> Optional[Tuple[int, int]]
"""Add a device notification.
:param str data_name: PLC storage address
:param pyads.structs.NotificationAttrib attr: object that contains
all the attributes for the definition of a notification
:param callback: callback function that gets executed on in the event
of a notification
:rtype: (int, int)
:returns: notification handle, user handle
Save the notification handle and the user handle on creating a
notification if you want to be able to remove the notification
later in your code.
**Usage**:
>>> import pyads
>>> from ctypes import size_of
>>>
>>> # Connect to the local TwinCAT PLC
>>> plc = pyads.Connection('127.0.0.1.1.1', 851)
>>>
>>> # Create callback function that prints the value
>>> def mycallback(adr, notification, user):
>>> contents = notification.contents
>>> value = next(
>>> map(int,
>>> bytearray(contents.data)[0:contents.cbSampleSize])
>>> )
>>> print(value)
>>>
>>> with plc:
>>> # Add notification with default settings
>>> attr = pyads.NotificationAttrib(size_of(pyads.PLCTYPE_INT))
>>>
>>> hnotification, huser = plc.add_device_notification(
>>> adr, attr, mycallback)
>>>
>>> # Remove notification
>>> plc.del_device_notification(hnotification, huser)
"""
if self._port is not None:
notification_handle, user_handle = adsSyncAddDeviceNotificationReqEx(
self._port, self._adr, data_name, attr, callback, user_handle
)
return notification_handle, user_handle
return None | Add a device notification.
:param str data_name: PLC storage address
:param pyads.structs.NotificationAttrib attr: object that contains
all the attributes for the definition of a notification
:param callback: callback function that gets executed on in the event
of a notification
:rtype: (int, int)
:returns: notification handle, user handle
Save the notification handle and the user handle on creating a
notification if you want to be able to remove the notification
later in your code.
**Usage**:
>>> import pyads
>>> from ctypes import size_of
>>>
>>> # Connect to the local TwinCAT PLC
>>> plc = pyads.Connection('127.0.0.1.1.1', 851)
>>>
>>> # Create callback function that prints the value
>>> def mycallback(adr, notification, user):
>>> contents = notification.contents
>>> value = next(
>>> map(int,
>>> bytearray(contents.data)[0:contents.cbSampleSize])
>>> )
>>> print(value)
>>>
>>> with plc:
>>> # Add notification with default settings
>>> attr = pyads.NotificationAttrib(size_of(pyads.PLCTYPE_INT))
>>>
>>> hnotification, huser = plc.add_device_notification(
>>> adr, attr, mycallback)
>>>
>>> # Remove notification
>>> plc.del_device_notification(hnotification, huser) |
def all(self, fields=None, include_fields=True, page=None, per_page=None, extra_params=None):
"""Retrieves a list of all the applications.
Important: The client_secret and encryption_key attributes can only be
retrieved with the read:client_keys scope.
Args:
fields (list of str, optional): A list of fields to include or
exclude from the result (depending on include_fields). Empty to
retrieve all fields.
include_fields (bool, optional): True if the fields specified are
to be included in the result, False otherwise.
page (int): The result's page number (zero based).
per_page (int, optional): The amount of entries per page.
extra_params (dictionary, optional): The extra parameters to add to
the request. The fields, include_fields, page and per_page values
specified as parameters take precedence over the ones defined here.
See: https://auth0.com/docs/api/management/v2#!/Clients/get_clients
"""
params = extra_params or {}
params['fields'] = fields and ','.join(fields) or None
params['include_fields'] = str(include_fields).lower()
params['page'] = page
params['per_page'] = per_page
return self.client.get(self._url(), params=params) | Retrieves a list of all the applications.
Important: The client_secret and encryption_key attributes can only be
retrieved with the read:client_keys scope.
Args:
fields (list of str, optional): A list of fields to include or
exclude from the result (depending on include_fields). Empty to
retrieve all fields.
include_fields (bool, optional): True if the fields specified are
to be included in the result, False otherwise.
page (int): The result's page number (zero based).
per_page (int, optional): The amount of entries per page.
extra_params (dictionary, optional): The extra parameters to add to
the request. The fields, include_fields, page and per_page values
specified as parameters take precedence over the ones defined here.
See: https://auth0.com/docs/api/management/v2#!/Clients/get_clients |
def connect(host=None,
port=rethinkdb.DEFAULT_PORT,
timeout=20,
verify=True,
**kwargs):
"""
RethinkDB semantic connection wrapper
raises <brain.connection.BrainNotReady> if connection verification fails
:param verify: <bool> (default True) whether to run POST
:param timeout: <int> max time (s) to wait for connection
:param kwargs: <dict> passthrough rethinkdb arguments
:return:
"""
if not host:
host = DEFAULT_HOSTS.get(check_stage_env())
connection = None
tries = 0
time_quit = time() + timeout
while not connection and time() <= time_quit:
tries += 1
connection = _attempt_connect(host, port, timeout/3, verify, **kwargs)
if not connection:
sleep(0.5)
if not connection:
raise BrainNotReady(
"Tried ({}:{}) {} times at {} second max timeout".format(host,
port,
tries,
timeout))
return connection | RethinkDB semantic connection wrapper
raises <brain.connection.BrainNotReady> if connection verification fails
:param verify: <bool> (default True) whether to run POST
:param timeout: <int> max time (s) to wait for connection
:param kwargs: <dict> passthrough rethinkdb arguments
:return: |
def p_function_declaration(self, p):
"""
function_declaration \
: FUNCTION identifier LPAREN RPAREN LBRACE function_body RBRACE
| FUNCTION identifier LPAREN formal_parameter_list RPAREN LBRACE \
function_body RBRACE
"""
if len(p) == 8:
p[0] = ast.FuncDecl(
identifier=p[2], parameters=None, elements=p[6])
else:
p[0] = ast.FuncDecl(
identifier=p[2], parameters=p[4], elements=p[7]) | function_declaration \
: FUNCTION identifier LPAREN RPAREN LBRACE function_body RBRACE
| FUNCTION identifier LPAREN formal_parameter_list RPAREN LBRACE \
function_body RBRACE |
def filter_cat(self, axis, cat_index, cat_name):
'''
Filter the matrix based on their category. cat_index is the index of the category, the first category has index=1.
'''
run_filter.filter_cat(self, axis, cat_index, cat_name) | Filter the matrix based on their category. cat_index is the index of the category, the first category has index=1. |
def shewhart(self, data: ['SASdata', str] = None,
boxchart: str = None,
cchart: str = None,
irchart: str = None,
mchart: str = None,
mrchart: str = None,
npchart: str = None,
pchart: str = None,
rchart: str = None,
schart: str = None,
uchart: str = None,
xrchart: str = None,
xschart: str = None,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the SHEWHART procedure
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=qcug&docsetTarget=qcug_shewhart_toc.htm&locale=en
:param data: SASdata object or string. This parameter is required.
:parm boxchart: The boxchart variable can only be a string type.
:parm cchart: The cchart variable can only be a string type.
:parm irchart: The irchart variable can only be a string type.
:parm mchart: The mchart variable can only be a string type.
:parm mrchart: The mrchart variable can only be a string type.
:parm npchart: The npchart variable can only be a string type.
:parm pchart: The pchart variable can only be a string type.
:parm rchart: The rchart variable can only be a string type.
:parm schart: The schart variable can only be a string type.
:parm uchart: The uchart variable can only be a string type.
:parm xrchart: The xrchart variable can only be a string type.
:parm xschart: The xschart variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
""" | Python method to call the SHEWHART procedure
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=qcug&docsetTarget=qcug_shewhart_toc.htm&locale=en
:param data: SASdata object or string. This parameter is required.
:parm boxchart: The boxchart variable can only be a string type.
:parm cchart: The cchart variable can only be a string type.
:parm irchart: The irchart variable can only be a string type.
:parm mchart: The mchart variable can only be a string type.
:parm mrchart: The mrchart variable can only be a string type.
:parm npchart: The npchart variable can only be a string type.
:parm pchart: The pchart variable can only be a string type.
:parm rchart: The rchart variable can only be a string type.
:parm schart: The schart variable can only be a string type.
:parm uchart: The uchart variable can only be a string type.
:parm xrchart: The xrchart variable can only be a string type.
:parm xschart: The xschart variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object |
def save_config(self, cmd="save config", confirm=False, confirm_response=""):
"""Save Config"""
return super(ExtremeVspSSH, self).save_config(
cmd=cmd, confirm=confirm, confirm_response=confirm_response
) | Save Config |
def generate_n_vectors(N_max, dx=1, dy=1, dz=1, half_lattice=True):
r"""
Generate integer vectors, :math:`\boldsymbol{n}`, with
:math:`|\boldsymbol{n}| < N_{\rm max}`.
If ``half_lattice=True``, only return half of the three-dimensional
lattice. If the set N = {(i,j,k)} defines the lattice, we restrict to
the cases such that ``(k > 0)``, ``(k = 0, j > 0)``, and
``(k = 0, j = 0, i > 0)``.
.. todo::
Return shape should be (3,N) to be consistent.
Parameters
----------
N_max : int
Maximum norm of the integer vector.
dx : int
Step size in x direction. Set to 1 for odd and even terms, set
to 2 for just even terms.
dy : int
Step size in y direction. Set to 1 for odd and even terms, set
to 2 for just even terms.
dz : int
Step size in z direction. Set to 1 for odd and even terms, set
to 2 for just even terms.
half_lattice : bool (optional)
Only return half of the 3D lattice.
Returns
-------
vecs : :class:`numpy.ndarray`
A 2D array of integers with :math:`|\boldsymbol{n}| < N_{\rm max}`
with shape (N,3).
"""
vecs = np.meshgrid(np.arange(-N_max, N_max+1, dx),
np.arange(-N_max, N_max+1, dy),
np.arange(-N_max, N_max+1, dz))
vecs = np.vstack(map(np.ravel, vecs)).T
vecs = vecs[np.linalg.norm(vecs, axis=1) <= N_max]
if half_lattice:
ix = ((vecs[:, 2] > 0) |
((vecs[:, 2] == 0) &
(vecs[:, 1] > 0)) |
((vecs[:, 2] == 0) &
(vecs[:, 1] == 0) &
(vecs[:, 0] > 0)))
vecs = vecs[ix]
vecs = np.array(sorted(vecs, key=lambda x: (x[0], x[1], x[2])))
return vecs | r"""
Generate integer vectors, :math:`\boldsymbol{n}`, with
:math:`|\boldsymbol{n}| < N_{\rm max}`.
If ``half_lattice=True``, only return half of the three-dimensional
lattice. If the set N = {(i,j,k)} defines the lattice, we restrict to
the cases such that ``(k > 0)``, ``(k = 0, j > 0)``, and
``(k = 0, j = 0, i > 0)``.
.. todo::
Return shape should be (3,N) to be consistent.
Parameters
----------
N_max : int
Maximum norm of the integer vector.
dx : int
Step size in x direction. Set to 1 for odd and even terms, set
to 2 for just even terms.
dy : int
Step size in y direction. Set to 1 for odd and even terms, set
to 2 for just even terms.
dz : int
Step size in z direction. Set to 1 for odd and even terms, set
to 2 for just even terms.
half_lattice : bool (optional)
Only return half of the 3D lattice.
Returns
-------
vecs : :class:`numpy.ndarray`
A 2D array of integers with :math:`|\boldsymbol{n}| < N_{\rm max}`
with shape (N,3). |
def set_voltage(self, value, channel=1):
""" channel: 1=OP1, 2=OP2, AUX is not supported"""
cmd = "V%d %f" % (channel, value)
self.write(cmd) | channel: 1=OP1, 2=OP2, AUX is not supported |
def ToRequest(self):
"""Converts to gitkit api request parameter dict.
Returns:
Dict, containing non-empty user attributes.
"""
param = {}
if self.email:
param['email'] = self.email
if self.user_id:
param['localId'] = self.user_id
if self.name:
param['displayName'] = self.name
if self.photo_url:
param['photoUrl'] = self.photo_url
if self.email_verified is not None:
param['emailVerified'] = self.email_verified
if self.password_hash:
param['passwordHash'] = base64.urlsafe_b64encode(self.password_hash)
if self.salt:
param['salt'] = base64.urlsafe_b64encode(self.salt)
if self.provider_info:
param['providerUserInfo'] = self.provider_info
return param | Converts to gitkit api request parameter dict.
Returns:
Dict, containing non-empty user attributes. |
def partial_derivative(self, X, y=0):
"""Compute partial derivative :math:`C(u|v)` of cumulative density.
Args:
X: `np.ndarray`
y: `float`
Returns:
"""
self.check_fit()
U, V = self.split_matrix(X)
if self.theta == 1:
return V
else:
t1 = np.power(-np.log(U), self.theta)
t2 = np.power(-np.log(V), self.theta)
p1 = self.cumulative_distribution(X)
p2 = np.power(t1 + t2, -1 + 1.0 / self.theta)
p3 = np.power(-np.log(V), self.theta - 1)
return np.divide(np.multiply(np.multiply(p1, p2), p3), V) - y | Compute partial derivative :math:`C(u|v)` of cumulative density.
Args:
X: `np.ndarray`
y: `float`
Returns: |
def wmean_and_var_str_array(W, x):
"""Weighted mean and variance of each component of a structured array.
Parameters
----------
W: (N,) ndarray
normalised weights (must be >=0 and sum to one).
x: (N,) structured array
data
Returns
-------
dictionary
{'mean':weighted_means, 'var':weighted_variances}
"""
m = np.empty(shape=x.shape[1:], dtype=x.dtype)
v = np.empty_like(m)
for p in x.dtype.names:
m[p], v[p] = wmean_and_var(W, x[p]).values()
return {'mean': m, 'var': v} | Weighted mean and variance of each component of a structured array.
Parameters
----------
W: (N,) ndarray
normalised weights (must be >=0 and sum to one).
x: (N,) structured array
data
Returns
-------
dictionary
{'mean':weighted_means, 'var':weighted_variances} |
def is_quota_exceeded(self) -> bool:
'''Return whether the quota is exceeded.'''
if self.quota and self._url_table is not None:
return self.size >= self.quota and \
self._url_table.get_root_url_todo_count() == 0 | Return whether the quota is exceeded. |
def wanmen_download_by_course_topic_part(json_api_content, tIndex, pIndex, output_dir='.', merge=True, info_only=False, **kwargs):
"""int, int, int->None
Download ONE PART of the course."""
html = json_api_content
title = _wanmen_get_title_by_json_topic_part(html,
tIndex,
pIndex)
bokeccID = _wanmen_get_boke_id_by_json_topic_part(html,
tIndex,
pIndex)
bokecc_download_by_id(vid = bokeccID, title = title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs) | int, int, int->None
Download ONE PART of the course. |
def filler(self):
"""Returns the pipeline ID that filled this slot's value.
Returns:
A string that is the pipeline ID.
Raises:
SlotNotFilledError if the value hasn't been filled yet.
"""
if not self.filled:
raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.'
% (self.name, self.key))
return self._filler_pipeline_key.name() | Returns the pipeline ID that filled this slot's value.
Returns:
A string that is the pipeline ID.
Raises:
SlotNotFilledError if the value hasn't been filled yet. |
def _get_span(self, m):
"""
Gets a tuple that identifies a span for the specific mention class
that m belongs to.
"""
return (m.sentence.id, m.char_start, m.char_end) | Gets a tuple that identifies a span for the specific mention class
that m belongs to. |
async def stepper_config(self, steps_per_revolution, stepper_pins):
"""
Configure stepper motor prior to operation.
This is a FirmataPlus feature.
:param steps_per_revolution: number of steps per motor revolution
:param stepper_pins: a list of control pin numbers - either 4 or 2
:returns: No return value.
"""
data = [PrivateConstants.STEPPER_CONFIGURE, steps_per_revolution & 0x7f,
(steps_per_revolution >> 7) & 0x7f]
for pin in range(len(stepper_pins)):
data.append(stepper_pins[pin])
await self._send_sysex(PrivateConstants.STEPPER_DATA, data) | Configure stepper motor prior to operation.
This is a FirmataPlus feature.
:param steps_per_revolution: number of steps per motor revolution
:param stepper_pins: a list of control pin numbers - either 4 or 2
:returns: No return value. |
def _mod_spec(self):
"""
Modified length specifiers: mapping between length modifiers and conversion specifiers. This generates all the
possibilities, i.e. hhd, etc.
"""
mod_spec={}
for mod, sizes in self.int_len_mod.items():
for conv in self.int_sign['signed']:
mod_spec[mod + conv] = sizes[0]
for conv in self.int_sign['unsigned']:
mod_spec[mod + conv] = sizes[1]
return mod_spec | Modified length specifiers: mapping between length modifiers and conversion specifiers. This generates all the
possibilities, i.e. hhd, etc. |
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data | Return the schedule data structure |
def make_app(global_conf, full_stack=True, **app_conf):
"""
Set depotexample up with the settings found in the PasteDeploy configuration
file used.
:param global_conf: The global settings for depotexample (those
defined under the ``[DEFAULT]`` section).
:type global_conf: dict
:param full_stack: Should the whole TG2 stack be set up?
:type full_stack: str or bool
:return: The depotexample application with all the relevant middleware
loaded.
This is the PasteDeploy factory for the depotexample application.
``app_conf`` contains all the application-specific settings (those defined
under ``[app:main]``.
"""
app = make_base_app(global_conf, full_stack=True, **app_conf)
# Wrap your base TurboGears 2 application with custom middleware here
from depot.manager import DepotManager
app = DepotManager.make_middleware(app)
return app | Set depotexample up with the settings found in the PasteDeploy configuration
file used.
:param global_conf: The global settings for depotexample (those
defined under the ``[DEFAULT]`` section).
:type global_conf: dict
:param full_stack: Should the whole TG2 stack be set up?
:type full_stack: str or bool
:return: The depotexample application with all the relevant middleware
loaded.
This is the PasteDeploy factory for the depotexample application.
``app_conf`` contains all the application-specific settings (those defined
under ``[app:main]``. |
def get_trans(self, out_vec=None):
"""Return the translation portion of the matrix as a vector.
If out_vec is provided, store in out_vec instead of creating a new Vec3.
"""
if out_vec:
return out_vec.set(*self.data[3][:3])
return Vec3(*self.data[3][:3]) | Return the translation portion of the matrix as a vector.
If out_vec is provided, store in out_vec instead of creating a new Vec3. |
def set_hot_pluggable_for_device(self, name, controller_port, device, hot_pluggable):
"""Sets a flag in the device information which indicates that the attached
device is hot pluggable or not. This may or may not be supported by a
particular controller and/or drive, and is silently ignored in the
latter case. Changing the setting while the VM is running is forbidden.
The device must already exist; see :py:func:`IMachine.attach_device`
for how to attach a new device.
The @a controllerPort and @a device parameters specify the device slot and
have have the same meaning as with :py:func:`IMachine.attach_device` .
in name of type str
Name of the storage controller.
in controller_port of type int
Storage controller port.
in device of type int
Device slot in the given port.
in hot_pluggable of type bool
New value for the hot-pluggable device flag.
raises :class:`OleErrorInvalidarg`
SATA device, SATA port, IDE port or IDE slot out of range.
raises :class:`VBoxErrorInvalidObjectState`
Attempt to modify an unregistered virtual machine.
raises :class:`VBoxErrorInvalidVmState`
Invalid machine state.
raises :class:`VBoxErrorNotSupported`
Controller doesn't support hot plugging.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
if not isinstance(controller_port, baseinteger):
raise TypeError("controller_port can only be an instance of type baseinteger")
if not isinstance(device, baseinteger):
raise TypeError("device can only be an instance of type baseinteger")
if not isinstance(hot_pluggable, bool):
raise TypeError("hot_pluggable can only be an instance of type bool")
self._call("setHotPluggableForDevice",
in_p=[name, controller_port, device, hot_pluggable]) | Sets a flag in the device information which indicates that the attached
device is hot pluggable or not. This may or may not be supported by a
particular controller and/or drive, and is silently ignored in the
latter case. Changing the setting while the VM is running is forbidden.
The device must already exist; see :py:func:`IMachine.attach_device`
for how to attach a new device.
The @a controllerPort and @a device parameters specify the device slot and
have have the same meaning as with :py:func:`IMachine.attach_device` .
in name of type str
Name of the storage controller.
in controller_port of type int
Storage controller port.
in device of type int
Device slot in the given port.
in hot_pluggable of type bool
New value for the hot-pluggable device flag.
raises :class:`OleErrorInvalidarg`
SATA device, SATA port, IDE port or IDE slot out of range.
raises :class:`VBoxErrorInvalidObjectState`
Attempt to modify an unregistered virtual machine.
raises :class:`VBoxErrorInvalidVmState`
Invalid machine state.
raises :class:`VBoxErrorNotSupported`
Controller doesn't support hot plugging. |
def format_args(self, args):
'''Format a D-Bus argument tuple into an appropriate logging string.'''
def format_arg(a):
if isinstance(a, dbus.Boolean):
return str(bool(a))
if isinstance(a, dbus.Byte):
return str(int(a))
if isinstance(a, int) or isinstance(a, long):
return str(a)
if isinstance(a, str):
return '"' + str(a) + '"'
if isinstance(a, unicode): # Python 2 only
return '"' + repr(a.encode('UTF-8'))[1:-1] + '"'
if isinstance(a, list):
return '[' + ', '.join([format_arg(x) for x in a]) + ']'
if isinstance(a, dict):
fmta = '{'
first = True
for k, v in a.items():
if first:
first = False
else:
fmta += ', '
fmta += format_arg(k) + ': ' + format_arg(v)
return fmta + '}'
# fallback
return repr(a)
s = ''
for a in args:
if s:
s += ' '
s += format_arg(a)
if s:
s = ' ' + s
return s | Format a D-Bus argument tuple into an appropriate logging string. |
def create_empty_dataset(self, dataset_id="", project_id="",
dataset_reference=None):
"""
Create a new empty dataset:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/insert
:param project_id: The name of the project where we want to create
an empty a dataset. Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param dataset_reference: Dataset reference that could be provided
with request body. More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_reference: dict
"""
if dataset_reference:
_validate_value('dataset_reference', dataset_reference, dict)
else:
dataset_reference = {}
if "datasetReference" not in dataset_reference:
dataset_reference["datasetReference"] = {}
if not dataset_reference["datasetReference"].get("datasetId") and not dataset_id:
raise ValueError(
"{} not provided datasetId. Impossible to create dataset")
dataset_required_params = [(dataset_id, "datasetId", ""),
(project_id, "projectId", self.project_id)]
for param_tuple in dataset_required_params:
param, param_name, param_default = param_tuple
if param_name not in dataset_reference['datasetReference']:
if param_default and not param:
self.log.info(
"%s was not specified. Will be used default value %s.",
param_name, param_default
)
param = param_default
dataset_reference['datasetReference'].update(
{param_name: param})
elif param:
_api_resource_configs_duplication_check(
param_name, param,
dataset_reference['datasetReference'], 'dataset_reference')
dataset_id = dataset_reference.get("datasetReference").get("datasetId")
dataset_project_id = dataset_reference.get("datasetReference").get(
"projectId")
self.log.info('Creating Dataset: %s in project: %s ', dataset_id,
dataset_project_id)
try:
self.service.datasets().insert(
projectId=dataset_project_id,
body=dataset_reference).execute(num_retries=self.num_retries)
self.log.info('Dataset created successfully: In project %s '
'Dataset %s', dataset_project_id, dataset_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
) | Create a new empty dataset:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/insert
:param project_id: The name of the project where we want to create
an empty a dataset. Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param dataset_reference: Dataset reference that could be provided
with request body. More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_reference: dict |
def add_time_step(self, **create_time_step_kwargs):
"""Creates a time-step and appends it to the list.
Args:
**create_time_step_kwargs: Forwarded to
time_step.TimeStep.create_time_step.
"""
ts = time_step.TimeStep.create_time_step(**create_time_step_kwargs)
assert isinstance(ts, time_step.TimeStep)
self._time_steps.append(ts) | Creates a time-step and appends it to the list.
Args:
**create_time_step_kwargs: Forwarded to
time_step.TimeStep.create_time_step. |
def absent(name,
**kwargs):
'''
Ensure the job is absent from the Jenkins configured jobs
name
The name of the Jenkins job to remove
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
if __salt__['jenkins.job_exists'](name):
try:
__salt__['jenkins.delete_job'](name)
except CommandExecutionError as exc:
return _fail(ret, exc.strerror)
else:
ret['comment'] = 'Job \'{0}\' deleted.'.format(name)
else:
ret['comment'] = 'Job \'{0}\' already absent.'.format(name)
return ret | Ensure the job is absent from the Jenkins configured jobs
name
The name of the Jenkins job to remove |
def status(name, init_system, verbose):
"""WIP! Try at your own expense
"""
try:
status = Serv(init_system, verbose=verbose).status(name)
except ServError as ex:
sys.exit(ex)
click.echo(json.dumps(status, indent=4, sort_keys=True)) | WIP! Try at your own expense |
def from_schema(cls, schema, handlers={}, **kwargs):
"""
Construct a resolver from a JSON schema object.
"""
return cls(
schema.get('$id', schema.get('id', '')) if isinstance(schema, dict) else '',
schema,
handlers=handlers,
**kwargs
) | Construct a resolver from a JSON schema object. |
def get_argument_parser():
"""Function to obtain the argument parser.
Returns
-------
A fully configured `argparse.ArgumentParser` object.
Notes
-----
This function is used by the `sphinx-argparse` extension for sphinx.
"""
file_mv = cli.file_mv
desc = 'Extracts gene-level expression data from StringTie output.'
parser = cli.get_argument_parser(desc)
parser.add_argument(
'-s', '--stringtie-file', type=str, required=True, metavar=file_mv,
help="""Path of the StringTie output file ."""
)
parser.add_argument(
'-g', '--gene-file', type=str, required=True, metavar=file_mv,
help="""File containing a list of protein-coding genes."""
)
parser.add_argument(
'--no-novel-transcripts', action='store_true',
help="""Ignore novel transcripts."""
)
# parser.add_argument(
# '--ambiguous-transcripts', default = 'ignore',
# help='Strategy for counting expression of ambiguous novel '
# 'transcripts.'
# )
# possible strategies for ambiguous transcripts: 'ignore','highest','all'
parser.add_argument(
'-o', '--output-file', type=str, required=True, metavar=file_mv,
help="""Path of output file."""
)
cli.add_reporting_args(parser)
return parser | Function to obtain the argument parser.
Returns
-------
A fully configured `argparse.ArgumentParser` object.
Notes
-----
This function is used by the `sphinx-argparse` extension for sphinx. |
def _compress(self, data, operation):
"""
This private method compresses some data in a given mode. This is used
because almost all of the code uses the exact same setup. It wouldn't
have to, but it doesn't hurt at all.
"""
# The 'algorithm' for working out how big to make this buffer is from
# the Brotli source code, brotlimodule.cc.
original_output_size = int(
math.ceil(len(data) + (len(data) >> 2) + 10240)
)
available_out = ffi.new("size_t *")
available_out[0] = original_output_size
output_buffer = ffi.new("uint8_t []", available_out[0])
ptr_to_output_buffer = ffi.new("uint8_t **", output_buffer)
input_size = ffi.new("size_t *", len(data))
input_buffer = ffi.new("uint8_t []", data)
ptr_to_input_buffer = ffi.new("uint8_t **", input_buffer)
rc = lib.BrotliEncoderCompressStream(
self._encoder,
operation,
input_size,
ptr_to_input_buffer,
available_out,
ptr_to_output_buffer,
ffi.NULL
)
if rc != lib.BROTLI_TRUE: # pragma: no cover
raise Error("Error encountered compressing data.")
assert not input_size[0]
size_of_output = original_output_size - available_out[0]
return ffi.buffer(output_buffer, size_of_output)[:] | This private method compresses some data in a given mode. This is used
because almost all of the code uses the exact same setup. It wouldn't
have to, but it doesn't hurt at all. |
def plot_surface(x, y, z, color=default_color, wrapx=False, wrapy=False):
"""Draws a 2d surface in 3d, defined by the 2d ordered arrays x,y,z.
:param x: {x2d}
:param y: {y2d}
:param z: {z2d}
:param color: {color2d}
:param bool wrapx: when True, the x direction is assumed to wrap, and polygons are drawn between the end end begin points
:param bool wrapy: simular for the y coordinate
:return: :any:`Mesh`
"""
return plot_mesh(x, y, z, color=color, wrapx=wrapx, wrapy=wrapy, wireframe=False) | Draws a 2d surface in 3d, defined by the 2d ordered arrays x,y,z.
:param x: {x2d}
:param y: {y2d}
:param z: {z2d}
:param color: {color2d}
:param bool wrapx: when True, the x direction is assumed to wrap, and polygons are drawn between the end end begin points
:param bool wrapy: simular for the y coordinate
:return: :any:`Mesh` |
def from_url(cls, reactor, url, key, alg=RS256, jws_client=None):
"""
Construct a client from an ACME directory at a given URL.
:param url: The ``twisted.python.url.URL`` to fetch the directory from.
See `txacme.urls` for constants for various well-known public
directories.
:param reactor: The Twisted reactor to use.
:param ~josepy.jwk.JWK key: The client key to use.
:param alg: The signing algorithm to use. Needs to be compatible with
the type of key used.
:param JWSClient jws_client: The underlying client to use, or ``None``
to construct one.
:return: The constructed client.
:rtype: Deferred[`Client`]
"""
action = LOG_ACME_CONSUME_DIRECTORY(
url=url, key_type=key.typ, alg=alg.name)
with action.context():
check_directory_url_type(url)
jws_client = _default_client(jws_client, reactor, key, alg)
return (
DeferredContext(jws_client.get(url.asText()))
.addCallback(json_content)
.addCallback(messages.Directory.from_json)
.addCallback(
tap(lambda d: action.add_success_fields(directory=d)))
.addCallback(cls, reactor, key, jws_client)
.addActionFinish()) | Construct a client from an ACME directory at a given URL.
:param url: The ``twisted.python.url.URL`` to fetch the directory from.
See `txacme.urls` for constants for various well-known public
directories.
:param reactor: The Twisted reactor to use.
:param ~josepy.jwk.JWK key: The client key to use.
:param alg: The signing algorithm to use. Needs to be compatible with
the type of key used.
:param JWSClient jws_client: The underlying client to use, or ``None``
to construct one.
:return: The constructed client.
:rtype: Deferred[`Client`] |
def open(self, inf, psw):
"""Return stream object for file data."""
if inf.file_redir:
# cannot leave to unrar as it expects copied file to exist
if inf.file_redir[0] in (RAR5_XREDIR_FILE_COPY, RAR5_XREDIR_HARD_LINK):
inf = self.getinfo(inf.file_redir[2])
if not inf:
raise BadRarFile('cannot find copied file')
if inf.flags & RAR_FILE_SPLIT_BEFORE:
raise NeedFirstVolume("Partial file, please start from first volume: " + inf.filename)
# is temp write usable?
use_hack = 1
if not self._main:
use_hack = 0
elif self._main._must_disable_hack():
use_hack = 0
elif inf._must_disable_hack():
use_hack = 0
elif is_filelike(self._rarfile):
pass
elif inf.file_size > HACK_SIZE_LIMIT:
use_hack = 0
elif not USE_EXTRACT_HACK:
use_hack = 0
# now extract
if inf.compress_type == RAR_M0 and (inf.flags & RAR_FILE_PASSWORD) == 0 and inf.file_redir is None:
return self._open_clear(inf)
elif use_hack:
return self._open_hack(inf, psw)
elif is_filelike(self._rarfile):
return self._open_unrar_membuf(self._rarfile, inf, psw)
else:
return self._open_unrar(self._rarfile, inf, psw) | Return stream object for file data. |
def get_node_attribute(self, node, attribute_name):
"""Given a node and the name of an attribute, get a copy
of that node's attribute.
:param node: reference to the node to retrieve the attribute of.
:param attribute_name: name of the attribute to retrieve.
:returns: attribute value of the attribute_name key for the
specified node.
:raises: ValueError -- No such node exists.
:raises: ValueError -- No such attribute exists.
"""
if not self.has_node(node):
raise ValueError("No such node exists.")
elif attribute_name not in self._node_attributes[node]:
raise ValueError("No such attribute exists.")
else:
return copy.\
copy(self._node_attributes[node][attribute_name]) | Given a node and the name of an attribute, get a copy
of that node's attribute.
:param node: reference to the node to retrieve the attribute of.
:param attribute_name: name of the attribute to retrieve.
:returns: attribute value of the attribute_name key for the
specified node.
:raises: ValueError -- No such node exists.
:raises: ValueError -- No such attribute exists. |
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects] | Returns the list of projects owned by user. |
def reportResourceUsage(imageObjectList, outwcs, num_cores,
interactive=False):
""" Provide some information to the user on the estimated resource
usage (primarily memory) for this run.
"""
from . import imageObject
if outwcs is None:
output_mem = 0
else:
if isinstance(outwcs,imageObject.WCSObject):
owcs = outwcs.final_wcs
else:
owcs = outwcs
output_mem = np.prod(owcs.pixel_shape) * 4 * 3 # bytes used for output arrays
img1 = imageObjectList[0]
numchips = 0
input_mem = 0
for img in imageObjectList:
numchips += img._nmembers # account for group parameter set by user
# if we have the cpus and s/w, ok, but still allow user to set pool size
pool_size = util.get_pool_size(num_cores, None)
pool_size = pool_size if (numchips >= pool_size) else numchips
inimg = 0
chip_mem = 0
for img in imageObjectList:
for chip in range(1,img._numchips+1):
cmem = img[chip].shape[0]*img[chip].shape[1]*4
inimg += 1
if inimg < pool_size:
input_mem += cmem*2
if chip_mem == 0:
chip_mem = cmem
max_mem = (input_mem + output_mem*pool_size + chip_mem*2)//(1024*1024)
print('*'*80)
print('*')
print('* Estimated memory usage: up to %d Mb.'%(max_mem))
print('* Output image size: {:d} X {:d} pixels. '.format(*owcs.pixel_shape))
print('* Output image file: ~ %d Mb. '%(output_mem//(1024*1024)))
print('* Cores available: %d'%(pool_size))
print('*')
print('*'*80)
if interactive:
print('Continue with processing?')
while True:
if sys.version_info[0] >= 3:
k = input("(y)es or (n)o").strip()[0].lower()
else:
k = raw_input("(y)es or (n)o").strip()[0].lower()
if k not in ['n', 'y']:
continue
if k == 'n':
raise KeyboardInterrupt("Execution aborted") | Provide some information to the user on the estimated resource
usage (primarily memory) for this run. |
def _gen_vol_xml(vmname,
diskname,
disktype,
size,
pool):
'''
Generate the XML string to define a libvirt storage volume
'''
size = int(size) * 1024 # MB
context = {
'name': vmname,
'filename': '{0}.{1}'.format(diskname, disktype),
'volname': diskname,
'disktype': disktype,
'size': six.text_type(size),
'pool': pool,
}
fn_ = 'libvirt_volume.jinja'
try:
template = JINJA.get_template(fn_)
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template %s', fn_)
return ''
return template.render(**context) | Generate the XML string to define a libvirt storage volume |
def deleteByteArray(self, context, page, returnError):
"""please override"""
returnError.contents.value = self.IllegalStateError
raise NotImplementedError("You must override this method.") | please override |
def unwrap(s, node_indent):
"""Group lines of a docstring to blocks.
For now, only groups markdown list sections.
A block designates a list of consequtive lines that all start at the same
indentation level.
The lines of the docstring are iterated top to bottom. Each line is added to
`block_list` until a line is encountered that breaks sufficiently with the previous
line to be deemed to be the start of a new block. At that point, all lines
currently
in `block_list` are stripped and joined to a single line, which is added to
`unwrap_list`.
Some of the block breaks are easy to determine. E.g., a line that starts with "- "
is the start of a new markdown style list item, so is always the start of a new
block. But then there are things like this, which is a single block:
- An example list with a second line
And this, which is 3 single line blocks (due to the different indentation levels):
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
"""
def get_indent():
if line_str.startswith('"""'):
return node_indent
return len(re.match(r"^( *)", line_str).group(1))
def finish_block():
if block_list:
unwrap_list.append(
(block_indent, (" ".join([v.strip() for v in block_list])).strip())
)
block_list.clear()
unwrap_list = []
block_indent = None
block_list = []
for line_str in s.splitlines():
line_str = line_str.rstrip()
line_indent = get_indent()
# A new block has been started. Record the indent of the first line in that
# block to use as the indent for all the lines that will be put in this block.
if not block_list:
block_indent = line_indent
# A blank line always starts a new block.
if line_str == "":
finish_block()
# Indent any lines that are less indentend than the docstr node
# if line_indent < node_indent:
# line_indent = block_indent
# A line that is indented less than the current block starts a new block.
if line_indent < block_indent:
finish_block()
# A line that is the start of a markdown list starts a new block.
elif line_str.strip().startswith(("- ", "* ")):
finish_block()
# A markdown title always starts a new block.
elif line_str.strip().endswith(":"):
finish_block()
block_list.append(line_str)
# Only make blocks for markdown list items. Write everything else as single line items.
if not block_list[0].strip().startswith(("- ", "* ")):
finish_block()
# Finish the block that was in progress when the end of the docstring was reached.
finish_block()
return unwrap_list | Group lines of a docstring to blocks.
For now, only groups markdown list sections.
A block designates a list of consequtive lines that all start at the same
indentation level.
The lines of the docstring are iterated top to bottom. Each line is added to
`block_list` until a line is encountered that breaks sufficiently with the previous
line to be deemed to be the start of a new block. At that point, all lines
currently
in `block_list` are stripped and joined to a single line, which is added to
`unwrap_list`.
Some of the block breaks are easy to determine. E.g., a line that starts with "- "
is the start of a new markdown style list item, so is always the start of a new
block. But then there are things like this, which is a single block:
- An example list with a second line
And this, which is 3 single line blocks (due to the different indentation levels):
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64. |
def writeto(fpath, to_write, aslines=False, verbose=None):
r"""
Writes (utf8) text to a file.
Args:
fpath (PathLike): file path
to_write (str): text to write (must be unicode text)
aslines (bool): if True to_write is assumed to be a list of lines
verbose (bool): verbosity flag
CommandLine:
python -m ubelt.util_io writeto --verbose
Example:
>>> import ubelt as ub
>>> dpath = ub.ensure_app_cache_dir('ubelt')
>>> fpath = dpath + '/' + 'testwrite.txt'
>>> if exists(fpath):
>>> os.remove(fpath)
>>> to_write = 'utf-8 symbols Δ, Й, ק, م, ๗, あ, 叶, 葉, and 말.'
>>> writeto(fpath, to_write)
>>> read_ = ub.readfrom(fpath)
>>> print('read_ = ' + read_)
>>> print('to_write = ' + to_write)
>>> assert read_ == to_write
Example:
>>> import ubelt as ub
>>> dpath = ub.ensure_app_cache_dir('ubelt')
>>> fpath = dpath + '/' + 'testwrite2.txt'
>>> if exists(fpath):
>>> os.remove(fpath)
>>> to_write = ['a\n', 'b\n', 'c\n', 'd\n']
>>> writeto(fpath, to_write, aslines=True)
>>> read_ = ub.readfrom(fpath, aslines=True)
>>> print('read_ = {}'.format(read_))
>>> print('to_write = {}'.format(to_write))
>>> assert read_ == to_write
"""
if verbose:
print('Writing to text file: %r ' % (fpath,))
with open(fpath, 'wb') as file:
if aslines:
to_write = map(_ensure_bytes , to_write)
file.writelines(to_write)
else:
# convert to bytes for writing
bytes = _ensure_bytes(to_write)
file.write(bytes) | r"""
Writes (utf8) text to a file.
Args:
fpath (PathLike): file path
to_write (str): text to write (must be unicode text)
aslines (bool): if True to_write is assumed to be a list of lines
verbose (bool): verbosity flag
CommandLine:
python -m ubelt.util_io writeto --verbose
Example:
>>> import ubelt as ub
>>> dpath = ub.ensure_app_cache_dir('ubelt')
>>> fpath = dpath + '/' + 'testwrite.txt'
>>> if exists(fpath):
>>> os.remove(fpath)
>>> to_write = 'utf-8 symbols Δ, Й, ק, م, ๗, あ, 叶, 葉, and 말.'
>>> writeto(fpath, to_write)
>>> read_ = ub.readfrom(fpath)
>>> print('read_ = ' + read_)
>>> print('to_write = ' + to_write)
>>> assert read_ == to_write
Example:
>>> import ubelt as ub
>>> dpath = ub.ensure_app_cache_dir('ubelt')
>>> fpath = dpath + '/' + 'testwrite2.txt'
>>> if exists(fpath):
>>> os.remove(fpath)
>>> to_write = ['a\n', 'b\n', 'c\n', 'd\n']
>>> writeto(fpath, to_write, aslines=True)
>>> read_ = ub.readfrom(fpath, aslines=True)
>>> print('read_ = {}'.format(read_))
>>> print('to_write = {}'.format(to_write))
>>> assert read_ == to_write |
def compute_hkdf(ikm, salt):
"""
Standard hkdf algorithm
:param {Buffer} ikm Input key material.
:param {Buffer} salt Salt value.
:return {Buffer} Strong key material.
@private
"""
prk = hmac.new(salt, ikm, hashlib.sha256).digest()
info_bits_update = info_bits + bytearray(chr(1), 'utf-8')
hmac_hash = hmac.new(prk, info_bits_update, hashlib.sha256).digest()
return hmac_hash[:16] | Standard hkdf algorithm
:param {Buffer} ikm Input key material.
:param {Buffer} salt Salt value.
:return {Buffer} Strong key material.
@private |
def fromJavascript(cls, javascript_datetime):
''' a method to construct labDT from a javascript datetime string
:param javascript_datetime: string with datetime info in javascript formatting
:return: labDT object
'''
# validate inputs
title = 'Javascript datetime string input for labDT.fromJavascript'
jsDTpattern = re.compile('\s*\(.*\)$')
jsGMTpattern = re.compile('GMT[-\+]\d{4}')
if not jsGMTpattern.findall(javascript_datetime):
raise Exception('\n%s must have a GMT timezone adjustment.' % title)
# construct datetime from javascript string
adj_input = jsDTpattern.sub('', javascript_datetime)
if '-' in adj_input:
adj_input = adj_input.replace('-','+')
elif '+' in adj_input:
adj_input = adj_input.replace('+','-')
python_datetime = dTparser.parse(adj_input)
dT = python_datetime.astimezone(pytz.utc)
dt_kwargs = {
'year': dT.year,
'month': dT.month,
'day': dT.day,
'hour': dT.hour,
'minute': dT.minute,
'second': dT.second,
'microsecond': dT.microsecond,
'tzinfo': dT.tzinfo
}
return labDT(**dt_kwargs) | a method to construct labDT from a javascript datetime string
:param javascript_datetime: string with datetime info in javascript formatting
:return: labDT object |
def find_win32_generator():
"""
Find a suitable cmake "generator" under Windows.
"""
# XXX this assumes we will find a generator that's the same, or
# compatible with, the one which was used to compile LLVM... cmake
# seems a bit lacking here.
cmake_dir = os.path.join(here_dir, 'dummy')
# LLVM 4.0+ needs VS 2015 minimum.
generators = []
if os.environ.get("CMAKE_GENERATOR"):
generators.append(os.environ.get("CMAKE_GENERATOR"))
# Drop generators that are too old
vspat = re.compile(r'Visual Studio (\d+)')
def drop_old_vs(g):
m = vspat.match(g)
if m is None:
return True # keep those we don't recognize
ver = int(m.group(1))
return ver >= 14
generators = list(filter(drop_old_vs, generators))
generators.append('Visual Studio 14 2015' + (' Win64' if is_64bit else ''))
for generator in generators:
build_dir = tempfile.mkdtemp()
print("Trying generator %r" % (generator,))
try:
try_cmake(cmake_dir, build_dir, generator)
except subprocess.CalledProcessError:
continue
else:
# Success
return generator
finally:
shutil.rmtree(build_dir)
raise RuntimeError("No compatible cmake generator installed on this machine") | Find a suitable cmake "generator" under Windows. |
def walker(self, path=None, base_folder=None):
"""
This method walk a directory structure and create the
Folders and Files as they appear.
"""
path = path or self.path or ''
base_folder = base_folder or self.base_folder
# prevent trailing slashes and other inconsistencies on path.
path = os.path.normpath(upath(path))
if base_folder:
base_folder = os.path.normpath(upath(base_folder))
print("The directory structure will be imported in %s" % (base_folder,))
if self.verbosity >= 1:
print("Import the folders and files in %s" % (path,))
root_folder_name = os.path.basename(path)
for root, dirs, files in os.walk(path):
rel_folders = root.partition(path)[2].strip(os.path.sep).split(os.path.sep)
while '' in rel_folders:
rel_folders.remove('')
if base_folder:
folder_names = base_folder.split('/') + [root_folder_name] + rel_folders
else:
folder_names = [root_folder_name] + rel_folders
folder = self.get_or_create_folder(folder_names)
for file_obj in files:
dj_file = DjangoFile(open(os.path.join(root, file_obj), mode='rb'),
name=file_obj)
self.import_file(file_obj=dj_file, folder=folder)
if self.verbosity >= 1:
print(('folder_created #%s / file_created #%s / ' + 'image_created #%s') % (self.folder_created, self.file_created, self.image_created)) | This method walk a directory structure and create the
Folders and Files as they appear. |
def list_sinks(self, project, page_size=0, page_token=None):
"""List sinks for the project associated with this client.
:type project: str
:param project: ID of the project whose sinks are to be listed.
:type page_size: int
:param page_size: maximum number of sinks to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of sinks. If not
passed, the API will return the first page of
sinks.
:rtype: tuple, (list, str)
:returns: list of mappings, plus a "next page token" string:
if not None, indicates that more sinks can be retrieved
with another call (pass that value as ``page_token``).
"""
path = "projects/%s" % (project,)
page_iter = self._gapic_api.list_sinks(path, page_size=page_size)
page_iter.client = self._client
page_iter.next_page_token = page_token
page_iter.item_to_value = _item_to_sink
return page_iter | List sinks for the project associated with this client.
:type project: str
:param project: ID of the project whose sinks are to be listed.
:type page_size: int
:param page_size: maximum number of sinks to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of sinks. If not
passed, the API will return the first page of
sinks.
:rtype: tuple, (list, str)
:returns: list of mappings, plus a "next page token" string:
if not None, indicates that more sinks can be retrieved
with another call (pass that value as ``page_token``). |
def plot(self, data):
""" Plots an overview in a list of dataframes
Args:
data: a dictionary with key the name, and value the dataframe.
"""
import IPython
if not isinstance(data, dict) or not all(isinstance(v, pd.DataFrame) for v in data.values()):
raise ValueError('Expect a dictionary where the values are all dataframes.')
gfsg = GenericFeatureStatisticsGenerator()
data = [{'name': k, 'table': self._remove_nonascii(v)} for k, v in six.iteritems(data)]
data_proto = gfsg.ProtoFromDataFrames(data)
protostr = base64.b64encode(data_proto.SerializeToString()).decode("utf-8")
html_id = 'f' + datalab.utils.commands.Html.next_id()
HTML_TEMPLATE = """<link rel="import" href="/nbextensions/gcpdatalab/extern/facets-jupyter.html" >
<facets-overview id="{html_id}"></facets-overview>
<script>
document.querySelector("#{html_id}").protoInput = "{protostr}";
</script>"""
html = HTML_TEMPLATE.format(html_id=html_id, protostr=protostr)
return IPython.core.display.HTML(html) | Plots an overview in a list of dataframes
Args:
data: a dictionary with key the name, and value the dataframe. |
def branches(self):
# type: () -> List[str]
""" List of all branches this commit is a part of. """
if self._branches is None:
cmd = 'git branch --contains {}'.format(self.sha1)
out = shell.run(
cmd,
capture=True,
never_pretend=True
).stdout.strip()
self._branches = [x.strip('* \t\n') for x in out.splitlines()]
return self._branches | List of all branches this commit is a part of. |
def _spawn_background_rendering(self, rate=5.0):
"""
Spawns a thread that updates the render window.
Sometimes directly modifiying object data doesn't trigger
Modified() and upstream objects won't be updated. This
ensures the render window stays updated without consuming too
many resources.
"""
self.render_trigger.connect(self.ren_win.Render)
twait = rate**-1
def render():
while self.active:
time.sleep(twait)
self._render()
self.render_thread = Thread(target=render)
self.render_thread.start() | Spawns a thread that updates the render window.
Sometimes directly modifiying object data doesn't trigger
Modified() and upstream objects won't be updated. This
ensures the render window stays updated without consuming too
many resources. |
def delete(self, user, commit=True):
""" Delete a user """
events.user_delete_event.send(user)
return super().delete(user, commit) | Delete a user |
def sde(self):
"""
Return the state space representation of the covariance.
"""
variance = float(self.variance.values)
lengthscale = float(self.lengthscale.values)
foo = np.sqrt(3.)/lengthscale
F = np.array([[0, 1], [-foo**2, -2*foo]])
L = np.array([[0], [1]])
Qc = np.array([[12.*np.sqrt(3) / lengthscale**3 * variance]])
H = np.array([[1, 0]])
Pinf = np.array([[variance, 0],
[0, 3.*variance/(lengthscale**2)]])
# Allocate space for the derivatives
dF = np.empty([F.shape[0],F.shape[1],2])
dQc = np.empty([Qc.shape[0],Qc.shape[1],2])
dPinf = np.empty([Pinf.shape[0],Pinf.shape[1],2])
# The partial derivatives
dFvariance = np.zeros([2,2])
dFlengthscale = np.array([[0,0],
[6./lengthscale**3,2*np.sqrt(3)/lengthscale**2]])
dQcvariance = np.array([12.*np.sqrt(3)/lengthscale**3])
dQclengthscale = np.array([-3*12*np.sqrt(3)/lengthscale**4*variance])
dPinfvariance = np.array([[1,0],[0,3./lengthscale**2]])
dPinflengthscale = np.array([[0,0],
[0,-6*variance/lengthscale**3]])
# Combine the derivatives
dF[:,:,0] = dFvariance
dF[:,:,1] = dFlengthscale
dQc[:,:,0] = dQcvariance
dQc[:,:,1] = dQclengthscale
dPinf[:,:,0] = dPinfvariance
dPinf[:,:,1] = dPinflengthscale
return (F, L, Qc, H, Pinf, dF, dQc, dPinf) | Return the state space representation of the covariance. |
def train(cls, rdd, k, maxIterations=100, initMode="random"):
r"""
:param rdd:
An RDD of (i, j, s\ :sub:`ij`\) tuples representing the
affinity matrix, which is the matrix A in the PIC paper. The
similarity s\ :sub:`ij`\ must be nonnegative. This is a symmetric
matrix and hence s\ :sub:`ij`\ = s\ :sub:`ji`\ For any (i, j) with
nonzero similarity, there should be either (i, j, s\ :sub:`ij`\) or
(j, i, s\ :sub:`ji`\) in the input. Tuples with i = j are ignored,
because it is assumed s\ :sub:`ij`\ = 0.0.
:param k:
Number of clusters.
:param maxIterations:
Maximum number of iterations of the PIC algorithm.
(default: 100)
:param initMode:
Initialization mode. This can be either "random" to use
a random vector as vertex properties, or "degree" to use
normalized sum similarities.
(default: "random")
"""
model = callMLlibFunc("trainPowerIterationClusteringModel",
rdd.map(_convert_to_vector), int(k), int(maxIterations), initMode)
return PowerIterationClusteringModel(model) | r"""
:param rdd:
An RDD of (i, j, s\ :sub:`ij`\) tuples representing the
affinity matrix, which is the matrix A in the PIC paper. The
similarity s\ :sub:`ij`\ must be nonnegative. This is a symmetric
matrix and hence s\ :sub:`ij`\ = s\ :sub:`ji`\ For any (i, j) with
nonzero similarity, there should be either (i, j, s\ :sub:`ij`\) or
(j, i, s\ :sub:`ji`\) in the input. Tuples with i = j are ignored,
because it is assumed s\ :sub:`ij`\ = 0.0.
:param k:
Number of clusters.
:param maxIterations:
Maximum number of iterations of the PIC algorithm.
(default: 100)
:param initMode:
Initialization mode. This can be either "random" to use
a random vector as vertex properties, or "degree" to use
normalized sum similarities.
(default: "random") |
def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None):
"""
:param service: The Ceph user name to run the command under
:type service: str
:param pool_name: Name of pool
:type pool_name: str
:param max_bytes: Maximum bytes quota to apply
:type max_bytes: int
:param max_objects: Maximum objects quota to apply
:type max_objects: int
:raises: subprocess.CalledProcessError
"""
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name]
if max_bytes:
cmd = cmd + ['max_bytes', str(max_bytes)]
if max_objects:
cmd = cmd + ['max_objects', str(max_objects)]
check_call(cmd) | :param service: The Ceph user name to run the command under
:type service: str
:param pool_name: Name of pool
:type pool_name: str
:param max_bytes: Maximum bytes quota to apply
:type max_bytes: int
:param max_objects: Maximum objects quota to apply
:type max_objects: int
:raises: subprocess.CalledProcessError |
def idle_task(self):
'''called on idle'''
if self.threat_timeout_timer.trigger():
self.check_threat_timeout()
if self.threat_detection_timer.trigger():
self.perform_threat_detection() | called on idle |
def props_to_image(regionprops, shape, prop):
r"""
Creates an image with each region colored according the specified ``prop``,
as obtained by ``regionprops_3d``.
Parameters
----------
regionprops : list
This is a list of properties for each region that is computed
by PoreSpy's ``regionprops_3D`` or Skimage's ``regionsprops``.
shape : array_like
The shape of the original image for which ``regionprops`` was obtained.
prop : string
The region property of interest. Can be a scalar item such as 'volume'
in which case the the regions will be colored by their respective
volumes, or can be an image-type property such as 'border' or
'convex_image', which will return an image composed of the sub-images.
Returns
-------
image : ND-array
An ND-image the same size as the original image, with each region
represented by the values specified in ``prop``.
See Also
--------
props_to_DataFrame
regionprops_3d
"""
im = sp.zeros(shape=shape)
for r in regionprops:
if prop == 'convex':
mask = r.convex_image
else:
mask = r.image
temp = mask * r[prop]
s = bbox_to_slices(r.bbox)
im[s] += temp
return im | r"""
Creates an image with each region colored according the specified ``prop``,
as obtained by ``regionprops_3d``.
Parameters
----------
regionprops : list
This is a list of properties for each region that is computed
by PoreSpy's ``regionprops_3D`` or Skimage's ``regionsprops``.
shape : array_like
The shape of the original image for which ``regionprops`` was obtained.
prop : string
The region property of interest. Can be a scalar item such as 'volume'
in which case the the regions will be colored by their respective
volumes, or can be an image-type property such as 'border' or
'convex_image', which will return an image composed of the sub-images.
Returns
-------
image : ND-array
An ND-image the same size as the original image, with each region
represented by the values specified in ``prop``.
See Also
--------
props_to_DataFrame
regionprops_3d |
Subsets and Splits