text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def titles(self, unique=False):
"""Return a list of all available spreadsheet titles.
Args:
unique (bool): drop duplicates
Returns:
list: list of title/name strings
"""
if unique:
return tools.uniqued(title for _, title in self.iterfiles())
return [title for _, title in self.iterfiles()] | 0.005362 |
def count_unique_mapped_reads(self, file_name, paired_end):
"""
For a bam or sam file with paired or or single-end reads, returns the
number of mapped reads, counting each read only once, even if it appears
mapped at multiple locations.
:param str file_name: name of reads file
:param bool paired_end: True/False paired end data
:return int: Number of uniquely mapped reads.
"""
_, ext = os.path.splitext(file_name)
ext = ext.lower()
if ext == ".sam":
param = "-S -F4"
elif ext == "bam":
param = "-F4"
else:
raise ValueError("Not a SAM or BAM: '{}'".format(file_name))
if paired_end:
r1 = self.samtools_view(file_name, param=param + " -f64", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'")
r2 = self.samtools_view(file_name, param=param + " -f128", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'")
else:
r1 = self.samtools_view(file_name, param=param + "", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'")
r2 = 0
return int(r1) + int(r2) | 0.005659 |
def get_collections(self, ignore=None):
"""Return the collections matching the given `_ignore` value
Parameters
----------
ignore : `bool`, or `None`
value of `_ignore` to match
Returns
-------
collections : `list`
if `ignore=None`, simply returns all collections, otherwise
returns those collections matching the `ignore` parameter
"""
if ignore is None:
return self.collections
return [c for c in self.collections if
getattr(c, '_ignore', None) == ignore] | 0.003317 |
def coherence(self, other, fftlength=None, overlap=None,
window='hann', **kwargs):
"""Calculate the frequency-coherence between this `TimeSeries`
and another.
Parameters
----------
other : `TimeSeries`
`TimeSeries` signal to calculate coherence with
fftlength : `float`, optional
number of seconds in single FFT, defaults to a single FFT
covering the full duration
overlap : `float`, optional
number of seconds of overlap between FFTs, defaults to the
recommended overlap for the given window (if given), or 0
window : `str`, `numpy.ndarray`, optional
window function to apply to timeseries prior to FFT,
see :func:`scipy.signal.get_window` for details on acceptable
formats
**kwargs
any other keyword arguments accepted by
:func:`matplotlib.mlab.cohere` except ``NFFT``, ``window``,
and ``noverlap`` which are superceded by the above keyword
arguments
Returns
-------
coherence : `~gwpy.frequencyseries.FrequencySeries`
the coherence `FrequencySeries` of this `TimeSeries`
with the other
Notes
-----
If `self` and `other` have difference
:attr:`TimeSeries.sample_rate` values, the higher sampled
`TimeSeries` will be down-sampled to match the lower.
See Also
--------
:func:`matplotlib.mlab.cohere`
for details of the coherence calculator
"""
from matplotlib import mlab
from ..frequencyseries import FrequencySeries
# check sampling rates
if self.sample_rate.to('Hertz') != other.sample_rate.to('Hertz'):
sampling = min(self.sample_rate.value, other.sample_rate.value)
# resample higher rate series
if self.sample_rate.value == sampling:
other = other.resample(sampling)
self_ = self
else:
self_ = self.resample(sampling)
else:
sampling = self.sample_rate.value
self_ = self
# check fft lengths
if overlap is None:
overlap = 0
else:
overlap = int((overlap * self_.sample_rate).decompose().value)
if fftlength is None:
fftlength = int(self_.size/2. + overlap/2.)
else:
fftlength = int((fftlength * self_.sample_rate).decompose().value)
if window is not None:
kwargs['window'] = signal.get_window(window, fftlength)
coh, freqs = mlab.cohere(self_.value, other.value, NFFT=fftlength,
Fs=sampling, noverlap=overlap, **kwargs)
out = coh.view(FrequencySeries)
out.xindex = freqs
out.epoch = self.epoch
out.name = 'Coherence between %s and %s' % (self.name, other.name)
out.unit = 'coherence'
return out | 0.00099 |
def _lnqmed_residual(catchment):
"""
Return ln(QMED) model error at a gauged catchment
:param catchment: Gauged catchment
:type catchment: :class:`Catchment`
:return: Model error
:rtype: float
"""
analysis = QmedAnalysis(catchment, year=2000) # Probably should set the year to the midpoint of amax rec.
logmedian_amax = log(analysis.qmed(method='amax_records'))
logmedian_descr = log(analysis.qmed(method='descriptors'))
return logmedian_amax - logmedian_descr | 0.005455 |
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
simulation = kwargs[self.injkey]
msg = simulation.get('msg')
reply = simulation.get('reply')
fault = simulation.get('fault')
if msg is None:
if reply is not None:
return self.__reply(reply, args, kwargs)
if fault is not None:
return self.__fault(fault)
raise Exception('(reply|fault) expected when msg=None')
sax = Parser()
msg = sax.parse(string=msg)
return self.send(msg) | 0.002174 |
def user_activities(self, user_id, extra_query_params={}):
"""
client = BacklogClient("your_space_name", "your_api_key")
client.user_activities(3)
client.user_activities(3, {"count": 2, "order": "asc"})
"""
return self.do("GET", "users/{user_id}/activities",
url_params={"user_id": user_id},
query_params=extra_query_params) | 0.004785 |
def insert(self, **kwargs):
"""
Insert commands at the beginning of the sequence.
This is provided because certain commands
have to come first (such as user creation), but may be need to beadded
after other commands have already been specified.
Later calls to insert put their commands before those in the earlier calls.
Also, since the order of iterated kwargs is not guaranteed (in Python 2.x),
you should really only call insert with one keyword at a time. See the doc of append
for more details.
:param kwargs: the key/value pair to append first
:return: the action, so you can append Action(...).insert(...).append(...)
"""
for k, v in six.iteritems(kwargs):
self.commands.insert(0, {k: v})
return self | 0.007194 |
def do_work(self):
""" Do work """
self._starttime = time.time()
if not os.path.isdir(self._dir2):
if self._maketarget:
if self._verbose:
self.log('Creating directory %s' % self._dir2)
try:
os.makedirs(self._dir2)
self._numnewdirs += 1
except Exception as e:
self.log(str(e))
return None
# All right!
self._mainfunc()
self._endtime = time.time() | 0.003597 |
def xcorr(x, y=None, maxlags=None, norm='biased'):
"""Cross-correlation using numpy.correlate
Estimates the cross-correlation (and autocorrelation) sequence of a random
process of length N. By default, there is no normalisation and the output
sequence of the cross-correlation has a length 2*N+1.
:param array x: first data array of length N
:param array y: second data array of length N. If not specified, computes the
autocorrelation.
:param int maxlags: compute cross correlation between [-maxlags:maxlags]
when maxlags is not specified, the range of lags is [-N+1:N-1].
:param str option: normalisation in ['biased', 'unbiased', None, 'coeff']
The true cross-correlation sequence is
.. math:: r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m])
However, in practice, only a finite segment of one realization of the
infinite-length random process is available.
The correlation is estimated using numpy.correlate(x,y,'full').
Normalisation is handled by this function using the following cases:
* 'biased': Biased estimate of the cross-correlation function
* 'unbiased': Unbiased estimate of the cross-correlation function
* 'coeff': Normalizes the sequence so the autocorrelations at zero
lag is 1.0.
:return:
* a numpy.array containing the cross-correlation sequence (length 2*N-1)
* lags vector
.. note:: If x and y are not the same length, the shorter vector is
zero-padded to the length of the longer vector.
.. rubric:: Examples
.. doctest::
>>> from spectrum import xcorr
>>> x = [1,2,3,4,5]
>>> c, l = xcorr(x,x, maxlags=0, norm='biased')
>>> c
array([ 11.])
.. seealso:: :func:`CORRELATION`.
"""
N = len(x)
if y is None:
y = x
assert len(x) == len(y), 'x and y must have the same length. Add zeros if needed'
if maxlags is None:
maxlags = N-1
lags = np.arange(0, 2*N-1)
else:
assert maxlags <= N, 'maxlags must be less than data length'
lags = np.arange(N-maxlags-1, N+maxlags)
res = np.correlate(x, y, mode='full')
if norm == 'biased':
Nf = float(N)
res = res[lags] / float(N) # do not use /= !!
elif norm == 'unbiased':
res = res[lags] / (float(N)-abs(np.arange(-N+1, N)))[lags]
elif norm == 'coeff':
Nf = float(N)
rms = pylab_rms_flat(x) * pylab_rms_flat(y)
res = res[lags] / rms / Nf
else:
res = res[lags]
lags = np.arange(-maxlags, maxlags+1)
return res, lags | 0.001521 |
def _writeMzmlChecksum(xmlWriter, outputFile):
""" #TODO: docstring
:param xmlWriter: #TODO: docstring
:param outputFile: #TODO: docstring
"""
sha = hashlib.sha1(outputFile.getvalue())
sha.update('<fileChecksum>')
xmlChecksumElement = ETREE.Element('fileChecksum')
xmlChecksumElement.text = sha.hexdigest()
xmlWriter.write(xmlChecksumElement, pretty_print=True) | 0.0025 |
def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
'''
Find the first file to match the path and ref. This operates similarly to
the roots file sever but with assumptions of the directory structure
based on svn standard practices.
'''
fnd = {'path': '',
'rel': ''}
if os.path.isabs(path) or tgt_env not in envs():
return fnd
for repo in init():
env_root = _env_root(repo, tgt_env)
if env_root is None:
# Environment not found, try the next repo
continue
if repo['mountpoint'] \
and not path.startswith(repo['mountpoint'] + os.path.sep):
continue
repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep)
if repo['root']:
repo_path = os.path.join(repo['root'], repo_path)
full = os.path.join(env_root, repo_path)
if os.path.isfile(full):
fnd['rel'] = path
fnd['path'] = full
try:
# Converting the stat result to a list, the elements of the
# list correspond to the following stat_result params:
# 0 => st_mode=33188
# 1 => st_ino=10227377
# 2 => st_dev=65026
# 3 => st_nlink=1
# 4 => st_uid=1000
# 5 => st_gid=1000
# 6 => st_size=1056233
# 7 => st_atime=1468284229
# 8 => st_mtime=1456338235
# 9 => st_ctime=1456338235
fnd['stat'] = list(os.stat(full))
except Exception:
pass
return fnd
return fnd | 0.000594 |
def get_default_section(self, file_name):
"""Returns first non-DEFAULT section; falls back to DEFAULT."""
if not os.path.isfile(file_name):
return 'DEFAULT'
parser = self.make_parser()
with open(file_name) as fp:
parser.read_file(fp)
sections = parser.sections()
section = sections[0] if len(sections) > 0 else 'DEFAULT'
return section | 0.004819 |
def load_schema(schema_path):
"""Prepare the api specification for request and response validation.
:returns: a mapping from :class:`RequestMatcher` to :class:`ValidatorMap`
for every operation in the api specification.
:rtype: dict
"""
with open(schema_path, 'r') as schema_file:
schema = simplejson.load(schema_file)
resolver = RefResolver('', '', schema.get('models', {}))
return build_request_to_validator_map(schema, resolver) | 0.002101 |
def bulk_refresh(self):
"""
Refreshes all refreshable tokens in the queryset.
Deletes any tokens which fail to refresh.
Deletes any tokens which are expired and cannot refresh.
Excludes tokens for which the refresh was incomplete for other reasons.
"""
session = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID)
auth = requests.auth.HTTPBasicAuth(app_settings.ESI_SSO_CLIENT_ID, app_settings.ESI_SSO_CLIENT_SECRET)
incomplete = []
for model in self.filter(refresh_token__isnull=False):
try:
model.refresh(session=session, auth=auth)
logging.debug("Successfully refreshed {0}".format(repr(model)))
except TokenError:
logger.info("Refresh failed for {0}. Deleting.".format(repr(model)))
model.delete()
except IncompleteResponseError:
incomplete.append(model.pk)
self.filter(refresh_token__isnull=True).get_expired().delete()
return self.exclude(pk__in=incomplete) | 0.003738 |
def _search_generator(self, item: Any) -> Generator[Any, None, None]:
"""A helper method for `self.search` that returns a generator rather than a list."""
results = 0
for x in self.enumerate(item):
yield x
results += 1
if results == 0:
raise SearchError(str(item)) | 0.009036 |
def save(self) -> None:
"""
Save the training trace to :py:attr:`CXF_TRACE_FILE` file under the specified directory.
:raise ValueError: if no output directory was specified
"""
if self._output_dir is None:
raise ValueError('Can not save TrainingTrace without output dir.')
yaml_to_file(self._trace, self._output_dir, CXF_TRACE_FILE) | 0.007634 |
def add_user(
self, user,
first_name=None, last_name=None,
email=None, password=None
):
"""
Add a new user.
Args:
user (string): User name.
first_name (optional[string]): User's first name. Defaults to None.
last_name (optional[string]): User's last name. Defaults to None.
email: (optional[string]): User's email address. Defaults to None.
password: (optional[string]): User's password. Defaults to None.
Raises:
requests.HTTPError on failure.
"""
self.project_service.set_auth(self._token_project)
self.project_service.add_user(
user, first_name, last_name, email, password) | 0.003916 |
def get(self, sid):
"""
Constructs a ConferenceContext
:param sid: The unique string that identifies this resource
:returns: twilio.rest.api.v2010.account.conference.ConferenceContext
:rtype: twilio.rest.api.v2010.account.conference.ConferenceContext
"""
return ConferenceContext(self._version, account_sid=self._solution['account_sid'], sid=sid, ) | 0.007389 |
def set_autosession(self, value=None):
"""
Turn autosession (automatic committing after each modification call) on/off.
If value is None, only query the current value (don't change anything).
"""
if value is not None:
self.rollback()
self.autosession = value
return self.autosession | 0.008475 |
def _query_names(self, names):
"""Find products by their names, e.g.
S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.
Note that duplicates exist on server, so multiple products can be returned for each name.
Parameters
----------
names : list[string]
List of product names.
Returns
-------
dict[string, dict[str, dict]]
A dictionary mapping each name to a dictionary which contains the products with
that name (with ID as the key).
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
products = {}
# 40 names per query fits reasonably well inside the query limit
for chunk in chunks(names, 40):
query = " OR ".join(chunk)
products.update(self.query(raw=query))
# Group the products
output = OrderedDict((name, dict()) for name in names)
for id, metadata in products.items():
name = metadata['identifier']
output[name][id] = metadata
return output | 0.004153 |
def hill_i(self,x,threshold=0.1,power=2):
""" Inhibiting hill function.
Is equivalent to 1-hill_a(self,x,power,threshold).
"""
x_pow = np.power(x,power)
threshold_pow = np.power(threshold,power)
return threshold_pow / (x_pow + threshold_pow) | 0.02381 |
def preload_tops(meta):
"""Load all topology files into memory.
This might save some performance compared to re-parsing the topology
file for each trajectory you try to load in. Typically, you have far
fewer (possibly 1) topologies than trajectories
Parameters
----------
meta : pd.DataFrame
The DataFrame of metadata with a column named 'top_fn'
Returns
-------
tops : dict
Dictionary of ``md.Topology`` objects, keyed by "top_fn"
values.
"""
top_fns = set(meta['top_fn'])
tops = {}
for tfn in top_fns:
tops[tfn] = md.load_topology(tfn)
return tops | 0.001553 |
def send_email_from_template(to_email, from_email, subject,
markdown_template=None,
text_template=None, html_template=None,
fail_silently=False, context=None,
**kwargs):
"""Send an email from a template.
:param to_email: the email address to send the email to
:param from_email: the email address the email will be from
:param subject: the subject of the email
:param markdown_template: the markdown syntax template to use for the
email. If provided, this will generate both the text and html versions
of the email. You must have the "markdown" library installed in order
to use this. pip install markdown.
:param text_template: the template for the text version of the email. This
can be omitted if the markdown_template is provided.
:param html_template: the template for the html version of the email. This
can be omitted if the markdown_template is provided.
:param context: the context for the email templates
"""
return send_emails_from_template(
to_emails=[to_email],
from_email=from_email,
subject=subject,
markdown_template=markdown_template,
text_template=text_template,
html_template=html_template,
fail_silently=fail_silently,
context=context,
**kwargs
) | 0.000695 |
def QA_indicator_BIAS(DataFrame, N1, N2, N3):
'乖离率'
CLOSE = DataFrame['close']
BIAS1 = (CLOSE - MA(CLOSE, N1)) / MA(CLOSE, N1) * 100
BIAS2 = (CLOSE - MA(CLOSE, N2)) / MA(CLOSE, N2) * 100
BIAS3 = (CLOSE - MA(CLOSE, N3)) / MA(CLOSE, N3) * 100
DICT = {'BIAS1': BIAS1, 'BIAS2': BIAS2, 'BIAS3': BIAS3}
return pd.DataFrame(DICT) | 0.002849 |
def _config(name, conf, default=None):
'''
Return a value for 'name' from the config file options. If the 'name' is
not in the config, the 'default' value is returned. This method converts
unicode values to str type under python 2.
'''
try:
value = conf[name]
except KeyError:
value = default
return salt.utils.data.decode(value, to_str=True) | 0.002564 |
def remove(self, key):
"""
Remove the data stored for the given key.
Args:
key (str): Key of the data to remove.
Note:
The container has to be opened in advance.
"""
self.raise_error_if_not_open()
if key in self._file:
del self._file[key] | 0.006006 |
def smeft_evolve_continuous(C_in, scale_in, scale_out, newphys=True, **kwargs):
"""Solve the SMEFT RGEs by numeric integration, returning a function that
allows to compute an interpolated solution at arbitrary intermediate
scales."""
sol = _smeft_evolve(C_in, scale_in, scale_out, newphys=newphys,
dense_output=True, **kwargs)
@np.vectorize
def _rge_solution(scale):
t = log(scale)
y = sol.sol(t).view(complex)
yd = C_array2dict(y)
yw = arrays2wcxf_nonred(yd)
return yw
def rge_solution(scale):
# this is to return a scalar if the input is scalar
return _rge_solution(scale)[()]
return rge_solution | 0.004225 |
def _POUpdateBuilderWrapper(env, target=None, source=_null, **kw):
""" Wrapper for `POUpdate` builder - make user's life easier """
if source is _null:
if 'POTDOMAIN' in kw:
domain = kw['POTDOMAIN']
elif 'POTDOMAIN' in env and env['POTDOMAIN']:
domain = env['POTDOMAIN']
else:
domain = 'messages'
source = [ domain ] # NOTE: Suffix shall be appended automatically
return env._POUpdateBuilder(target, source, **kw) | 0.022075 |
def mutate(self, mutation=None, set_obj=None, del_obj=None, set_nquads=None,
del_nquads=None, commit_now=None, ignore_index_conflict=None,
timeout=None, metadata=None, credentials=None):
"""Adds a mutate operation to the transaction."""
mutation = self._common_mutate(
mutation=mutation, set_obj=set_obj, del_obj=del_obj,
set_nquads=set_nquads, del_nquads=del_nquads,
commit_now=commit_now, ignore_index_conflict=ignore_index_conflict)
new_metadata = self._dg.add_login_metadata(metadata)
mutate_error = None
try:
assigned = self._dc.mutate(mutation, timeout=timeout,
metadata=new_metadata,
credentials=credentials)
except Exception as error:
if util.is_jwt_expired(error):
self._dg.retry_login()
new_metadata = self._dg.add_login_metadata(metadata)
try:
assigned = self._dc.mutate(mutation, timeout=timeout,
metadata=new_metadata,
credentials=credentials)
except Exception as error:
mutate_error = error
else:
mutate_error = error
if mutate_error is not None:
try:
self.discard(timeout=timeout, metadata=metadata,
credentials=credentials)
except:
# Ignore error - user should see the original error.
pass
self._common_except_mutate(mutate_error)
if mutation.commit_now:
self._finished = True
self.merge_context(assigned.context)
return assigned | 0.002703 |
def old_status(self, old_status):
"""
Sets the old_status of this BuildSetStatusChangedEvent.
:param old_status: The old_status of this BuildSetStatusChangedEvent.
:type: str
"""
allowed_values = ["NEW", "DONE", "REJECTED"]
if old_status not in allowed_values:
raise ValueError(
"Invalid value for `old_status` ({0}), must be one of {1}"
.format(old_status, allowed_values)
)
self._old_status = old_status | 0.003795 |
def make_copy(cls, generator):
"""
Creates a copy of the generator.
:param generator: the generator to copy
:type generator: DataGenerator
:return: the copy of the generator
:rtype: DataGenerator
"""
return from_commandline(
to_commandline(generator), classname=classes.get_classname(DataGenerator())) | 0.007937 |
def validate_description(xml_data):
""" Validate the description for validity """
try:
root = ET.fromstring('<document>' + xml_data + '</document>')
except StdlibParseError as e:
raise ParseError(str(e))
return _parse_desc(root) | 0.003846 |
def java_version():
"""Call java and return version information.
:return unicode: Java version string
"""
result = subprocess.check_output(
[c.JAVA, '-version'], stderr=subprocess.STDOUT
)
first_line = result.splitlines()[0]
return first_line.decode() | 0.003472 |
def subs_consts(self, expr):
"""Substitute constants in expression unless it is already a number."""
if isinstance(expr, numbers.Number):
return expr
else:
return expr.subs(self.constants) | 0.008475 |
def add_contact(self, contact_id, scope='contact/invite'):
"""
Add a contact
contact_id can either be the mxit ID of a service or a Mxit user
User authentication required with the following scope: 'contact/invite'
"""
return _put(
token=self.oauth.get_user_token(scope),
uri='/user/socialgraph/contact/' + urllib.quote(contact_id)
) | 0.004854 |
def parents(self, vertex):
"""
Return the list of immediate parents of this vertex.
"""
return [self.tail(edge) for edge in self.in_edges(vertex)] | 0.011173 |
def secure_authorized_channel(
credentials, target, ssl_credentials=None):
"""Creates a secure authorized gRPC channel."""
http_request = _request_factory()
return google.auth.transport.grpc.secure_authorized_channel(
credentials, http_request, target,
ssl_credentials=ssl_credentials) | 0.003115 |
def list_instances(show=1, name=None, group=None, release=None, except_release=None):
"""
Retrieves all virtual machines instances in the current environment.
"""
from burlap.common import shelf, OrderedDict, get_verbose
verbose = get_verbose()
require('vm_type', 'vm_group')
assert env.vm_type, 'No VM type specified.'
env.vm_type = (env.vm_type or '').lower()
_name = name
_group = group
_release = release
if verbose:
print('name=%s, group=%s, release=%s' % (_name, _group, _release))
env.vm_elastic_ip_mappings = shelf.get('vm_elastic_ip_mappings')
data = type(env)()
if env.vm_type == EC2:
if verbose:
print('Checking EC2...')
for instance in get_all_running_ec2_instances():
name = instance.tags.get(env.vm_name_tag)
group = instance.tags.get(env.vm_group_tag)
release = instance.tags.get(env.vm_release_tag)
if env.vm_group and env.vm_group != group:
if verbose:
print(('Skipping instance %s because its group "%s" '
'does not match env.vm_group "%s".') \
% (instance.public_dns_name, group, env.vm_group))
continue
if _group and group != _group:
if verbose:
print(('Skipping instance %s because its group "%s" '
'does not match local group "%s".') \
% (instance.public_dns_name, group, _group))
continue
if _name and name != _name:
if verbose:
print(('Skipping instance %s because its name "%s" '
'does not match name "%s".') \
% (instance.public_dns_name, name, _name))
continue
if _release and release != _release:
if verbose:
print(('Skipping instance %s because its release "%s" '
'does not match release "%s".') \
% (instance.public_dns_name, release, _release))
continue
if except_release and release == except_release:
continue
if verbose:
print('Adding instance %s (%s).' \
% (name, instance.public_dns_name))
data.setdefault(name, type(env)())
data[name]['id'] = instance.id
data[name]['public_dns_name'] = instance.public_dns_name
if verbose:
print('Public DNS: %s' % instance.public_dns_name)
if env.vm_elastic_ip_mappings and name in env.vm_elastic_ip_mappings:
data[name]['ip'] = env.vm_elastic_ip_mappings[name]
else:
data[name]['ip'] = socket.gethostbyname(instance.public_dns_name)
if int(show):
pprint(data, indent=4)
return data
elif env.vm_type == KVM:
#virsh list
pass
else:
raise NotImplementedError | 0.006159 |
def set_param(self, param, value, header='Content-Type', requote=True,
charset=None, language=''):
"""Set a parameter in the Content-Type header.
If the parameter already exists in the header, its value will be
replaced with the new value.
If header is Content-Type and has not yet been defined for this
message, it will be set to "text/plain" and the new parameter and
value will be appended as per RFC 2045.
An alternate header can specified in the header argument, and all
parameters will be quoted as necessary unless requote is False.
If charset is specified, the parameter will be encoded according to RFC
2231. Optional language specifies the RFC 2231 language, defaulting
to the empty string. Both charset and language should be strings.
"""
if not isinstance(value, tuple) and charset:
value = (charset, language, value)
if header not in self and header.lower() == 'content-type':
ctype = 'text/plain'
else:
ctype = self.get(header)
if not self.get_param(param, header=header):
if not ctype:
ctype = _formatparam(param, value, requote)
else:
ctype = SEMISPACE.join(
[ctype, _formatparam(param, value, requote)])
else:
ctype = ''
for old_param, old_value in self.get_params(header=header,
unquote=requote):
append_param = ''
if old_param.lower() == param.lower():
append_param = _formatparam(param, value, requote)
else:
append_param = _formatparam(old_param, old_value, requote)
if not ctype:
ctype = append_param
else:
ctype = SEMISPACE.join([ctype, append_param])
if ctype != self.get(header):
del self[header]
self[header] = ctype | 0.001437 |
def parent(self) -> Optional['CtsReference']:
""" Parent of the actual URN, for example, 1.1 for 1.1.1
:rtype: CtsReference
"""
if self.start.depth == 1 and (self.end is None or self.end.depth <= 1):
return None
else:
if self.start.depth > 1 and (self.end is None or self.end.depth == 0):
return CtsReference("{0}{1}".format(
".".join(self.start.list[:-1]),
self.start.subreference or ""
))
elif self.start.depth > 1 and self.end is not None and self.end.depth > 1:
_start = self.start.list[0:-1]
_end = self.end.list[0:-1]
if _start == _end and \
self.start.subreference is None and \
self.end.subreference is None:
return CtsReference(
".".join(_start)
)
else:
return CtsReference("{0}{1}-{2}{3}".format(
".".join(_start),
self.start.subreference or "",
".".join(_end),
self.end.subreference or ""
)) | 0.003167 |
def set_publication_failure(cursor, exc):
"""Given a publication exception, set the publication as failed and
append the failure message to the publication record.
"""
publication_id = exc.publication_id
if publication_id is None:
raise ValueError("Exception must have a ``publication_id`` value.")
cursor.execute("""\
SELECT "state_messages"
FROM publications
WHERE id = %s""", (publication_id,))
state_messages = cursor.fetchone()[0]
if state_messages is None:
state_messages = []
entry = exc.__dict__
entry['message'] = exc.message
state_messages.append(entry)
state_messages = json.dumps(state_messages)
cursor.execute("""\
UPDATE publications SET ("state", "state_messages") = (%s, %s)
WHERE id = %s""", ('Failed/Error', state_messages, publication_id,)) | 0.001209 |
def export(group, bucket, prefix, start, end, role, poll_period=120,
session=None, name="", region=None):
"""export a given log group to s3"""
start = start and isinstance(start, six.string_types) and parse(start) or start
end = (end and isinstance(start, six.string_types) and
parse(end) or end or datetime.now())
start = start.replace(tzinfo=tzlocal()).astimezone(tzutc())
end = end.replace(tzinfo=tzlocal()).astimezone(tzutc())
if session is None:
session = get_session(role, region)
client = session.client('logs')
paginator = client.get_paginator('describe_log_groups')
for p in paginator.paginate():
found = False
for _group in p['logGroups']:
if _group['logGroupName'] == group:
group = _group
found = True
break
if found:
break
if not found:
raise ValueError("Log group %s not found." % group)
if prefix:
prefix = "%s/%s" % (prefix.rstrip('/'), group['logGroupName'].strip('/'))
else:
prefix = group['logGroupName']
named_group = "%s:%s" % (name, group['logGroupName'])
log.info(
"Log exporting group:%s start:%s end:%s bucket:%s prefix:%s size:%s",
named_group,
start.strftime('%Y/%m/%d'),
end.strftime('%Y/%m/%d'),
bucket,
prefix,
group['storedBytes'])
t = time.time()
days = [(
start + timedelta(i)).replace(minute=0, hour=0, second=0, microsecond=0)
for i in range((end - start).days)]
day_count = len(days)
s3 = boto3.Session().client('s3')
days = filter_extant_exports(s3, bucket, prefix, days, start, end)
log.info("Group:%s filtering s3 extant keys from %d to %d start:%s end:%s",
named_group, day_count, len(days),
days[0] if days else '', days[-1] if days else '')
t = time.time()
retry = get_retry(('SlowDown',))
for idx, d in enumerate(days):
date = d.replace(minute=0, microsecond=0, hour=0)
export_prefix = "%s%s" % (prefix, date.strftime("/%Y/%m/%d"))
params = {
'taskName': "%s-%s" % ("c7n-log-exporter",
date.strftime("%Y-%m-%d")),
'logGroupName': group['logGroupName'],
'fromTime': int(time.mktime(
date.replace(
minute=0, microsecond=0, hour=0).timetuple()) * 1000),
'to': int(time.mktime(
date.replace(
minute=59, hour=23, microsecond=0).timetuple()) * 1000),
'destination': bucket,
'destinationPrefix': export_prefix
}
# if stream_prefix:
# params['logStreamPrefix'] = stream_prefix
try:
s3.head_object(Bucket=bucket, Key=prefix)
except ClientError as e:
if e.response['Error']['Code'] != '404': # Not Found
raise
s3.put_object(
Bucket=bucket,
Key=prefix,
Body=json.dumps({}),
ACL="bucket-owner-full-control",
ServerSideEncryption="AES256")
t = time.time()
counter = 0
while True:
counter += 1
try:
result = client.create_export_task(**params)
except ClientError as e:
if e.response['Error']['Code'] == 'LimitExceededException':
time.sleep(poll_period)
# log every 30m of export waiting
if counter % 6 == 0:
log.debug(
"group:%s day:%s waiting for %0.2f minutes",
named_group, d.strftime('%Y-%m-%d'),
(counter * poll_period) / 60.0)
continue
raise
retry(
s3.put_object_tagging,
Bucket=bucket, Key=prefix,
Tagging={
'TagSet': [{
'Key': 'LastExport',
'Value': d.isoformat()}]})
break
log.info(
"Log export time:%0.2f group:%s day:%s bucket:%s prefix:%s task:%s",
time.time() - t,
named_group,
d.strftime("%Y-%m-%d"),
bucket,
params['destinationPrefix'],
result['taskId'])
log.info(
("Exported log group:%s time:%0.2f days:%d start:%s"
" end:%s bucket:%s prefix:%s"),
named_group,
time.time() - t,
len(days),
start.strftime('%Y/%m/%d'),
end.strftime('%Y/%m/%d'),
bucket,
prefix) | 0.001054 |
def _embed_state(embedding, state):
"""Embed a single state/sample by spreading it's values over the chains in the embedding"""
return {u: state[v] for v, chain in embedding.items() for u in chain} | 0.009756 |
def residual_block(x, hparams):
"""A stack of convolution blocks with residual connection."""
k = (hparams.kernel_height, hparams.kernel_width)
dilations_and_kernels = [((1, 1), k) for _ in range(3)]
y = common_layers.subseparable_conv_block(
x,
hparams.hidden_size,
dilations_and_kernels,
padding="SAME",
separability=0,
name="residual_block")
x = common_layers.layer_norm(x + y, hparams.hidden_size, name="lnorm")
return tf.nn.dropout(x, 1.0 - hparams.dropout) | 0.013725 |
def _set_raw(target, raw_value, bitarray):
''' put value into bit array '''
offset = int(target['offset'])
size = int(target['size'])
for digit in range(size):
bitarray[offset+digit] = (raw_value >> (size-digit-1)) & 0x01 != 0
return bitarray | 0.006803 |
def summary(self, CorpNum, JobID, Type, TaxType, PurposeType, TaxRegIDType, TaxRegIDYN, TaxRegID, UserID=None):
""" 수집 결과 요약정보 조회
args
CorpNum : 팝빌회원 사업자번호
JobID : 작업아이디
Type : 문서형태 배열, N-일반전자세금계산서, M-수정전자세금계산서
TaxType : 과세형태 배열, T-과세, N-면세, Z-영세
PurposeType : 영수/청구, R-영수, C-청구, N-없음
TaxRegIDType : 종사업장번호 사업자유형, S-공급자, B-공급받는자, T-수탁자
TaxRegIDYN : 종사업장번호 유무, 공백-전체조회, 0-종사업장번호 없음, 1-종사업장번호 있음
TaxRegID : 종사업장번호, 콤마(",")로 구분 하여 구성 ex) '0001,0002'
UserID : 팝빌회원 아이디
return
수집 결과 요약정보
raise
PopbillException
"""
if JobID == None or len(JobID) != 18:
raise PopbillException(-99999999, "작업아이디(jobID)가 올바르지 않습니다.")
uri = '/HomeTax/Taxinvoice/' + JobID + '/Summary'
uri += '?Type=' + ','.join(Type)
uri += '&TaxType=' + ','.join(TaxType)
uri += '&PurposeType=' + ','.join(PurposeType)
uri += '&TaxRegIDType=' + TaxRegIDType
uri += '&TaxRegID=' + TaxRegID
if TaxRegIDYN != '':
uri += '&TaxRegIDYN=' + TaxRegIDYN
return self._httpget(uri, CorpNum, UserID) | 0.00314 |
def setOutputObject(self, newOutput=output.CalcpkgOutput(True, True)):
"""Set an object where all output from calcpkg will be redirected to for this repository"""
self.output = newOutput | 0.026316 |
def authenticate_direct_credentials(self, username, password):
"""
Performs a direct bind, however using direct credentials. Can be used
if interfacing with an Active Directory domain controller which
authenticates using [email protected] directly.
Performing this kind of lookup limits the information we can get from
ldap. Instead we can only deduce whether or not their bind was
successful. Do not use this method if you require more user info.
Args:
username (str): Username for the user to bind with.
LDAP_BIND_DIRECT_PREFIX will be prepended and
LDAP_BIND_DIRECT_SUFFIX will be appended.
password (str): User's password to bind with.
Returns:
AuthenticationResponse
"""
bind_user = '{}{}{}'.format(
self.config.get('LDAP_BIND_DIRECT_PREFIX'),
username,
self.config.get('LDAP_BIND_DIRECT_SUFFIX')
)
connection = self._make_connection(
bind_user=bind_user,
bind_password=password,
)
response = AuthenticationResponse()
try:
connection.bind()
response.status = AuthenticationResponseStatus.success
response.user_id = username
log.debug(
"Authentication was successful for user '{0}'".format(username))
if self.config.get('LDAP_BIND_DIRECT_GET_USER_INFO'):
# User wants extra info about the bind
user_filter = '({search_attr}={username})'.format(
search_attr=self.config.get('LDAP_USER_LOGIN_ATTR'),
username=username
)
search_filter = '(&{0}{1})'.format(
self.config.get('LDAP_USER_OBJECT_FILTER'),
user_filter,
)
connection.search(
search_base=self.full_user_search_dn,
search_filter=search_filter,
search_scope=getattr(
ldap3, self.config.get('LDAP_USER_SEARCH_SCOPE')),
attributes=self.config.get('LDAP_GET_USER_ATTRIBUTES'),
)
if len(connection.response) == 0 or \
(self.config.get('LDAP_FAIL_AUTH_ON_MULTIPLE_FOUND') and
len(connection.response) > 1):
# Don't allow them to log in.
log.error(
"Could not gather extra info for user '{0}'".format(username))
else:
user = connection.response[0]
user['attributes']['dn'] = user['dn']
response.user_info = user['attributes']
response.user_dn = user['dn']
except ldap3.core.exceptions.LDAPInvalidCredentialsResult:
log.debug(
"Authentication was not successful for user '{0}'".format(username))
response.status = AuthenticationResponseStatus.fail
except Exception as e:
log.error(e)
response.status = AuthenticationResponseStatus.fail
self.destroy_connection(connection)
return response | 0.001805 |
def build_args():
"""Create command line argument parser."""
parser = argparse.ArgumentParser(description=u'Compile a sensor graph.')
parser.add_argument(u'sensor_graph', type=str, help=u"the sensor graph file to load and run.")
parser.add_argument(u'-f', u'--format', default=u"nodes", choices=[u'nodes', u'ast', u'snippet', u'ascii', u'config', u'script'], type=str, help=u"the output format for the compiled result.")
parser.add_argument(u'-o', u'--output', type=str, help=u"the output file to save the results (defaults to stdout)")
parser.add_argument(u'--disable-optimizer', action="store_true", help=u"disable the sensor graph optimizer completely")
return parser | 0.007153 |
def register(self, plugin, name=None):
""" Register a plugin and return its canonical name or None if the name
is blocked from registering. Raise a ValueError if the plugin is already
registered. """
plugin_name = name or self.get_canonical_name(plugin)
if plugin_name in self._name2plugin or plugin in self._plugin2hookcallers:
if self._name2plugin.get(plugin_name, -1) is None:
return # blocked plugin, return None to indicate no registration
raise ValueError(
"Plugin already registered: %s=%s\n%s"
% (plugin_name, plugin, self._name2plugin)
)
# XXX if an error happens we should make sure no state has been
# changed at point of return
self._name2plugin[plugin_name] = plugin
# register matching hook implementations of the plugin
self._plugin2hookcallers[plugin] = hookcallers = []
for name in dir(plugin):
hookimpl_opts = self.parse_hookimpl_opts(plugin, name)
if hookimpl_opts is not None:
normalize_hookimpl_opts(hookimpl_opts)
method = getattr(plugin, name)
hookimpl = HookImpl(plugin, plugin_name, method, hookimpl_opts)
hook = getattr(self.hook, name, None)
if hook is None:
hook = _HookCaller(name, self._hookexec)
setattr(self.hook, name, hook)
elif hook.has_spec():
self._verify_hook(hook, hookimpl)
hook._maybe_apply_history(hookimpl)
hook._add_hookimpl(hookimpl)
hookcallers.append(hook)
return plugin_name | 0.002875 |
def _visit(self, L, marked, tempmarked):
"""
Sort features topologically.
This recursive function uses depth-first search to find an ordering of
the features in the feature graph that is sorted both topologically and
with respect to genome coordinates.
Implementation based on Wikipedia's description of the algorithm in
Cormen's *Introduction to Algorithms*.
http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
There are potentially many valid topological sorts of a feature graph,
but only one that is also sorted with respect to genome coordinates
(excluding different orderings of, for example, exons and CDS features
with the same coordinates). Iterating through feature children in
reversed order (in this functions' inner-most loop) seems to be the key
to sorting with respect to genome coordinates.
"""
assert not self.is_pseudo
if self in tempmarked:
raise Exception('feature graph is cyclic')
if self not in marked:
tempmarked[self] = True
features = list()
if self.siblings is not None and self.is_toplevel:
features.extend(reversed(self.siblings))
if self.children is not None:
features.extend(reversed(self.children))
if len(features) > 0:
for feature in features:
feature._visit(L, marked, tempmarked)
marked[self] = True
del tempmarked[self]
L.insert(0, self) | 0.001246 |
def load_by_name(name):
"""
Load a spec from either a file path or a fully qualified name.
"""
if os.path.exists(name):
load_from_path(name)
else:
__import__(name) | 0.004975 |
def _dist_info_files(whl_zip):
"""Identify the .dist-info folder inside a wheel ZipFile."""
res = []
for path in whl_zip.namelist():
m = re.match(r'[^/\\]+-[^/\\]+\.dist-info/', path)
if m:
res.append(path)
if res:
return res
raise Exception("No .dist-info folder found in wheel") | 0.002976 |
def set_fig_size(self, width, height=None):
"""Set the figure size in inches.
Sets the figure size with a call to fig.set_size_inches.
Default in code is 8 inches for each.
Args:
width (float): Dimensions for figure width in inches.
height (float, optional): Dimensions for figure height in inches. Default is None.
"""
self.figure.figure_width = width
self.figure.figure_height = height
return | 0.006186 |
def LikelihoodFunction(Template, Data, PSD, detRespP, detGCDelay=0):
""" LikelihoodFunction - function to calculate the likelihood of livePoint,
given Data.
Template - (N_fd) complex array containing Fourier domain trial signal.
Data - (N_fd) complex array containing Fourier domain GW data.
PSD - Noise power spectral density for a gravitational wave detector.
detRespP - Antenna response to the plus GW polarisation for the
detector.
detGCDelay - Time delay of detector from geocenter (default = 0, use
detGCDelay only if computing logL for more than one
detector.
Returns logL of Template.
Sarah Gossan 2012. Last updated 02/18/14. """
# Correct template for geocenter delay and antenna response function
if detGCDelay:
phaseGCDelay = -2.*np.pi*np.linspace(0,N_fd-1,num=N_fd)*dF*detGCDelay*1j
Template *= phaseGCDelay
Template *= detRespP
# Calculate logL - simple Gaussian
logL = -2.*dF*np.sum(pow(abs(Data[lowBin:] - Template[lowBin:]),2.)/\
PSD[lowBin:])
return logL | 0.020721 |
def get(self, sid):
"""
Constructs a StepContext
:param sid: Step Sid.
:returns: twilio.rest.studio.v1.flow.engagement.step.StepContext
:rtype: twilio.rest.studio.v1.flow.engagement.step.StepContext
"""
return StepContext(
self._version,
flow_sid=self._solution['flow_sid'],
engagement_sid=self._solution['engagement_sid'],
sid=sid,
) | 0.004464 |
def display_value(self, ctx, value):
""" Display value to be used for this parameter. """
gandi = ctx.obj
gandi.log('%s: %s' % (self.name, (value if value is not None
else 'Not found'))) | 0.007937 |
def viscosity_kinematic_chem(conc_chem, temp, en_chem):
"""Return the dynamic viscosity of water at a given temperature.
If given units, the function will automatically convert to Kelvin.
If not given units, the function will assume Kelvin.
"""
if en_chem == 0:
nu = viscosity_kinematic_alum(conc_chem, temp).magnitude
if en_chem == 1:
nu = viscosity_kinematic_pacl(conc_chem, temp).magnitude
if en_chem not in [0,1]:
nu = pc.viscosity_kinematic(temp).magnitude
return nu | 0.024164 |
def prepare_dispatches(cls, message, recipients=None):
"""Creates Dispatch models for a given message and return them.
:param Message message: Message model instance
:param list|None recipients: A list or Recipient objects
:return: list of created Dispatch models
:rtype: list
"""
return Dispatch.create(message, recipients or cls.get_subscribers()) | 0.004926 |
def exons(context, build):
"""Load exons into the scout database"""
adapter = context.obj['adapter']
start = datetime.now()
# Test if there are any exons loaded
nr_exons = adapter.exons(build=build).count()
if nr_exons:
LOG.warning("Dropping all exons ")
adapter.drop_exons(build=build)
LOG.info("Exons dropped")
# Load the exons
ensembl_exons = fetch_ensembl_exons(build=build)
load_exons(adapter, ensembl_exons, build)
adapter.update_indexes()
LOG.info("Time to load exons: {0}".format(datetime.now() - start)) | 0.009901 |
def _vispy_emit_match_andor_record(self, record):
"""Log message emitter that optionally matches and/or records"""
test = record.getMessage()
match = self._vispy_match
if (match is None or re.search(match, test) or
re.search(match, _get_vispy_caller())):
if self._vispy_emit_record:
fmt_rec = self._vispy_formatter.format(record)
self._vispy_emit_list.append(fmt_rec)
if self._vispy_print_msg:
return logging.StreamHandler.emit(self, record)
else:
return | 0.003322 |
def choice_download(self):
"""Download script.tar.gz and sources
"""
Download(path="", url=self.dwn_srcs, repo="sbo").start()
raise SystemExit() | 0.011364 |
def get_data_file_names_from_scan_base(scan_base, filter_str=['_analyzed.h5', '_interpreted.h5', '_cut.h5', '_result.h5', '_hists.h5'], sort_by_time=True, meta_data_v2=True):
"""
Generate a list of .h5 files which have a similar file name.
Parameters
----------
scan_base : list, string
List of string or string of the scan base names. The scan_base will be used to search for files containing the string. The .h5 file extension will be added automatically.
filter : list, string
List of string or string which are used to filter the returned filenames. File names containing filter_str in the file name will not be returned. Use None to disable filter.
sort_by_time : bool
If True, return file name list sorted from oldest to newest. The time from meta table will be used to sort the files.
meta_data_v2 : bool
True for new (v2) meta data format, False for the old (v1) format.
Returns
-------
data_files : list
List of file names matching the obove conditions.
"""
data_files = []
if scan_base is None:
return data_files
if isinstance(scan_base, basestring):
scan_base = [scan_base]
for scan_base_str in scan_base:
if '.h5' == os.path.splitext(scan_base_str)[1]:
data_files.append(scan_base_str)
else:
data_files.extend(glob.glob(scan_base_str + '*.h5'))
if filter_str:
if isinstance(filter_str, basestring):
filter_str = [filter_str]
data_files = filter(lambda data_file: not any([(True if x in data_file else False) for x in filter_str]), data_files)
if sort_by_time and len(data_files) > 1:
f_list = {}
for data_file in data_files:
with tb.open_file(data_file, mode="r") as h5_file:
try:
meta_data = h5_file.root.meta_data
except tb.NoSuchNodeError:
logging.warning("File %s is missing meta_data" % h5_file.filename)
else:
try:
if meta_data_v2:
timestamp = meta_data[0]["timestamp_start"]
else:
timestamp = meta_data[0]["timestamp"]
except IndexError:
logging.info("File %s has empty meta_data" % h5_file.filename)
else:
f_list[data_file] = timestamp
data_files = list(sorted(f_list, key=f_list.__getitem__, reverse=False))
return data_files | 0.00348 |
def check_dependencies(model, model_queue, avaliable_models):
""" Check that all the depenedencies for this model are already in the queue. """
# A list of allowed links: existing fields, itself and the special case ContentType
allowed_links = [m.model.__name__ for m in model_queue] + [model.__name__, 'ContentType']
# For each ForeignKey or ManyToMany field, check that a link is possible
for field in model._meta.fields:
if not field.remote_field:
continue
if field.remote_field.model.__name__ not in allowed_links:
if field.remote_field.model not in avaliable_models:
continue
return False
for field in model._meta.many_to_many:
if not field.remote_field:
continue
if field.remote_field.model.__name__ not in allowed_links:
return False
return True | 0.004484 |
def parameter_count(funcsig):
"""Get the number of positional-or-keyword or position-only parameters in a
function signature.
Parameters
----------
funcsig : inspect.Signature
A UDF signature
Returns
-------
int
The number of parameters
"""
return sum(
param.kind in {param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY}
for param in funcsig.parameters.values()
if param.default is Parameter.empty
) | 0.002066 |
def _generate_auth_url(self, path, params, accepts_clientid):
"""Returns the path and query string portion of the request URL, first
adding any necessary parameters.
:param path: The path portion of the URL.
:type path: string
:param params: URL parameters.
:type params: dict or list of key/value tuples
:rtype: string
"""
# Deterministic ordering through sorting by key.
# Useful for tests, and in the future, any caching.
extra_params = getattr(self, "_extra_params", None) or {}
if type(params) is dict:
params = sorted(dict(extra_params, **params).items())
else:
params = sorted(extra_params.items()) + params[:] # Take a copy.
if accepts_clientid and self.client_id and self.client_secret:
if self.channel:
params.append(("channel", self.channel))
params.append(("client", self.client_id))
path = "?".join([path, urlencode_params(params)])
sig = sign_hmac(self.client_secret, path)
return path + "&signature=" + sig
if self.key:
params.append(("key", self.key))
return path + "?" + urlencode_params(params)
raise ValueError("Must provide API key for this API. It does not accept "
"enterprise credentials.") | 0.002861 |
def websafe(s):
"""return a string with HTML-safe text"""
s=s.replace("<","<").replace(">",">")
s=s.replace(r'\x',r' \x')
s=s.replace("\n","<br>")
return s | 0.043011 |
def wait_until_element_not_visible(webdriver, locator_lambda_expression,
timeout=WTF_TIMEOUT_MANAGER.NORMAL, sleep=0.5):
"""
Wait for a WebElement to disappear.
Args:
webdriver (Webdriver) - Selenium Webdriver
locator (lambda) - Locator lambda expression.
Kwargs:
timeout (number) - timeout period
sleep (number) - sleep period between intervals.
"""
# Wait for loading progress indicator to go away.
try:
stoptime = datetime.now() + timedelta(seconds=timeout)
while datetime.now() < stoptime:
element = WebDriverWait(webdriver, WTF_TIMEOUT_MANAGER.BRIEF).until(
locator_lambda_expression)
if element.is_displayed():
time.sleep(sleep)
else:
break
except TimeoutException:
pass | 0.005133 |
def zoom_for_pixelsize(pixel_size, max_z=24, tilesize=256):
"""
Get mercator zoom level corresponding to a pixel resolution.
Freely adapted from
https://github.com/OSGeo/gdal/blob/b0dfc591929ebdbccd8a0557510c5efdb893b852/gdal/swig/python/scripts/gdal2tiles.py#L294
Parameters
----------
pixel_size: float
Pixel size
max_z: int, optional (default: 24)
Max mercator zoom level allowed
tilesize: int, optional
Mercator tile size (default: 256).
Returns
-------
Mercator zoom level corresponding to the pixel resolution
"""
for z in range(max_z):
if pixel_size > _meters_per_pixel(z, 0, tilesize=tilesize):
return max(0, z - 1) # We don't want to scale up
return max_z - 1 | 0.001285 |
def patch_connection_file_paths(connection: str) -> str:
"""
Patch any paths in a connection to remove the balena host paths
Undoes the changes applied by
:py:meth:`opentrons.system.nmcli._rewrite_key_path_to_host_path`
:param connection: The contents of a NetworkManager connection file
:return: The patches contents, suitable for writing somewher
"""
new_conn_lines = []
for line in connection.split('\n'):
if '=' in line:
parts = line.split('=')
path_matches = re.search(
'/mnt/data/resin-data/[0-9]+/(.*)', parts[1])
if path_matches:
new_path = f'/data/{path_matches.group(1)}'
new_conn_lines.append(
'='.join([parts[0], new_path]))
LOG.info(
f"migrate_connection_file: {parts[0]}: "
f"{parts[1]}->{new_path}")
continue
new_conn_lines.append(line)
return '\n'.join(new_conn_lines) | 0.000982 |
def get_identifiers_splitted_by_weights(identifiers={}, proportions={}):
"""
Divide the given identifiers based on the given proportions. But instead of randomly split
the identifiers it is based on category weights. Every identifier has a weight for any
number of categories. The target is, to split the identifiers in a way, so the sum of
category k within part x is proportional to the sum of category x over all parts
according to the given proportions. This is done by greedily insert the identifiers step by
step in a part which has free space (weight). If there are no fitting parts anymore, the one
with the least weight exceed is used.
Args:
identifiers (dict): A dictionary containing the weights for each identifier (key). Per
item a dictionary of weights per category is given.
proportions (dict): Dict of proportions, with a identifier as key.
Returns:
dict: Dictionary containing a list of identifiers per part with the same key as the proportions dict.
Example::
>>> identifiers = {
>>> 'a': {'music': 2, 'speech': 1},
>>> 'b': {'music': 5, 'speech': 2},
>>> 'c': {'music': 2, 'speech': 4},
>>> 'd': {'music': 1, 'speech': 4},
>>> 'e': {'music': 3, 'speech': 4}
>>> }
>>> proportions = {
>>> "train" : 0.6,
>>> "dev" : 0.2,
>>> "test" : 0.2
>>> }
>>> get_identifiers_splitted_by_weights(identifiers, proportions)
{
'train': ['a', 'b', 'd'],
'dev': ['c'],
'test': ['e']
}
"""
# Get total weight per category
sum_per_category = collections.defaultdict(int)
for identifier, cat_weights in identifiers.items():
for category, weight in cat_weights.items():
sum_per_category[category] += weight
target_weights_per_part = collections.defaultdict(dict)
# Get target weight for each part and category
for category, total_weight in sum_per_category.items():
abs_proportions = absolute_proportions(proportions, total_weight)
for idx, proportion in abs_proportions.items():
target_weights_per_part[idx][category] = proportion
# Distribute items greedily
part_ids = sorted(list(proportions.keys()))
current_weights_per_part = {idx: collections.defaultdict(int) for idx in part_ids}
result = collections.defaultdict(list)
for identifier in sorted(identifiers.keys()):
cat_weights = identifiers[identifier]
target_part = None
current_part = 0
weight_over_target = collections.defaultdict(int)
# Search for fitting part
while target_part is None and current_part < len(part_ids):
free_space = True
part_id = part_ids[current_part]
part_weights = current_weights_per_part[part_id]
for category, weight in cat_weights.items():
target_weight = target_weights_per_part[part_id][category]
current_weight = part_weights[category]
weight_diff = current_weight + weight - target_weight
weight_over_target[part_id] += weight_diff
if weight_diff > 0:
free_space = False
# If weight doesn't exceed target, place identifier in part
if free_space:
target_part = part_id
current_part += 1
# If not found fitting part, select the part with the least overweight
if target_part is None:
target_part = sorted(weight_over_target.items(), key=lambda x: x[1])[0][0]
result[target_part].append(identifier)
for category, weight in cat_weights.items():
current_weights_per_part[target_part][category] += weight
return result | 0.002815 |
def JoinPath(self, path_segments):
"""Joins the path segments into a path.
Args:
path_segments (list[str]): path segments.
Returns:
str: joined path segments prefixed with the path separator.
"""
# For paths on Windows we need to make sure to handle the first path
# segment correctly.
first_path_segment = None
if path_segments and platform.system() == 'Windows':
# Check if the first path segment contains a "special" path definition.
first_path_segment = path_segments[0]
first_path_segment_length = len(first_path_segment)
first_path_segment_prefix = None
# In case the path start with: \\.\C:\
if (first_path_segment_length >= 7 and
first_path_segment.startswith('\\\\.\\') and
first_path_segment[5:7] == ':\\'):
first_path_segment_prefix = first_path_segment[4:6]
first_path_segment = first_path_segment[7:]
# In case the path start with: \\.\ or \\?\
elif (first_path_segment_length >= 4 and
first_path_segment[:4] in ['\\\\.\\', '\\\\?\\']):
first_path_segment_prefix = first_path_segment[:4]
first_path_segment = first_path_segment[4:]
# In case the path start with: C:
elif first_path_segment_length >= 2 and first_path_segment[1] == ':':
first_path_segment_prefix = first_path_segment[:2]
first_path_segment = first_path_segment[2:]
# In case the path start with: \\server\share (UNC).
elif first_path_segment.startswith('\\\\'):
prefix, _, remainder = first_path_segment[2:].partition(
self.PATH_SEPARATOR)
first_path_segment_prefix = '\\\\{0:s}'.format(prefix)
first_path_segment = '\\{0:s}'.format(remainder)
if first_path_segment_prefix:
first_path_segment, _, remainder = first_path_segment.partition(
self.PATH_SEPARATOR)
if not remainder:
_ = path_segments.pop(0)
else:
path_segments[0] = remainder
first_path_segment = ''.join([
first_path_segment_prefix, first_path_segment])
else:
first_path_segment = None
# We are not using os.path.join() here since it will not remove all
# variations of successive path separators.
# Split all the path segments based on the path (segment) separator.
path_segments = [
segment.split(self.PATH_SEPARATOR) for segment in path_segments]
# Flatten the sublists into one list.
path_segments = [
element for sublist in path_segments for element in sublist]
# Remove empty path segments.
path_segments = list(filter(None, path_segments))
if first_path_segment is None:
path = '{0:s}{1:s}'.format(
self.PATH_SEPARATOR, self.PATH_SEPARATOR.join(path_segments))
else:
path = first_path_segment
if path_segments:
path = '{0:s}{1:s}{2:s}'.format(
path, self.PATH_SEPARATOR, self.PATH_SEPARATOR.join(path_segments))
return path | 0.00694 |
def write(self, addr, data):
'''Write access.
:param addr: i2c slave address
:type addr: char
:param data: array/list of bytes
:type data: iterable
:rtype: None
'''
self.set_addr(addr & 0xfe)
self.set_data(data)
self.set_size(len(data))
self.start()
while not self.is_ready:
pass | 0.00495 |
def check_address(address):
"""
Check if the format of the address is correct
Arguments:
address (tuple):
(``str``, ``int``) representing an IP address and port,
respectively
.. note::
alternatively a local ``address`` can be a ``str`` when working
with UNIX domain sockets, if supported by the platform
Raises:
ValueError:
raised when address has an incorrect format
Example:
>>> check_address(('127.0.0.1', 22))
"""
if isinstance(address, tuple):
check_host(address[0])
check_port(address[1])
elif isinstance(address, string_types):
if os.name != 'posix':
raise ValueError('Platform does not support UNIX domain sockets')
if not (os.path.exists(address) or
os.access(os.path.dirname(address), os.W_OK)):
raise ValueError('ADDRESS not a valid socket domain socket ({0})'
.format(address))
else:
raise ValueError('ADDRESS is not a tuple, string, or character buffer '
'({0})'.format(type(address).__name__)) | 0.000845 |
def simulate_diffusion(self, save_pos=False, total_emission=True,
radial=False, rs=None, seed=1, path='./',
wrap_func=wrap_periodic,
chunksize=2**19, chunkslice='times', verbose=True):
"""Simulate Brownian motion trajectories and emission rates.
This method performs the Brownian motion simulation using the current
set of parameters. Before running this method you can check the
disk-space requirements using :method:`print_sizes`.
Results are stored to disk in HDF5 format and are accessible in
in `self.emission`, `self.emission_tot` and `self.position` as
pytables arrays.
Arguments:
save_pos (bool): if True, save the particles 3D trajectories
total_emission (bool): if True, store only the total emission array
containing the sum of emission of all the particles.
rs (RandomState object): random state object used as random number
generator. If None, use a random state initialized from seed.
seed (uint): when `rs` is None, `seed` is used to initialize the
random state, otherwise is ignored.
wrap_func (function): the function used to apply the boundary
condition (use :func:`wrap_periodic` or :func:`wrap_mirror`).
path (string): a folder where simulation data is saved.
verbose (bool): if False, prints no output.
"""
if rs is None:
rs = np.random.RandomState(seed=seed)
self.open_store_traj(chunksize=chunksize, chunkslice=chunkslice,
radial=radial, path=path)
# Save current random state for reproducibility
self.traj_group._v_attrs['init_random_state'] = rs.get_state()
em_store = self.emission_tot if total_emission else self.emission
print('- Start trajectories simulation - %s' % ctime(), flush=True)
if verbose:
print('[PID %d] Diffusion time:' % os.getpid(), end='')
i_chunk = 0
t_chunk_size = self.emission.chunkshape[1]
chunk_duration = t_chunk_size * self.t_step
par_start_pos = self.particles.positions
prev_time = 0
for time_size in iter_chunksize(self.n_samples, t_chunk_size):
if verbose:
curr_time = int(chunk_duration * (i_chunk + 1))
if curr_time > prev_time:
print(' %ds' % curr_time, end='', flush=True)
prev_time = curr_time
POS, em = self._sim_trajectories(time_size, par_start_pos, rs,
total_emission=total_emission,
save_pos=save_pos, radial=radial,
wrap_func=wrap_func)
## Append em to the permanent storage
# if total_emission, data is just a linear array
# otherwise is a 2-D array (self.num_particles, c_size)
em_store.append(em)
if save_pos:
self.position.append(np.vstack(POS).astype('float32'))
i_chunk += 1
self.store.h5file.flush()
# Save current random state
self.traj_group._v_attrs['last_random_state'] = rs.get_state()
self.store.h5file.flush()
print('\n- End trajectories simulation - %s' % ctime(), flush=True) | 0.00172 |
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2015-10-01-preview: :mod:`v2015_10_01_preview.models<azure.mgmt.resource.policy.v2015_10_01_preview.models>`
* 2016-04-01: :mod:`v2016_04_01.models<azure.mgmt.resource.policy.v2016_04_01.models>`
* 2016-12-01: :mod:`v2016_12_01.models<azure.mgmt.resource.policy.v2016_12_01.models>`
* 2017-06-01-preview: :mod:`v2017_06_01_preview.models<azure.mgmt.resource.policy.v2017_06_01_preview.models>`
* 2018-03-01: :mod:`v2018_03_01.models<azure.mgmt.resource.policy.v2018_03_01.models>`
* 2018-05-01: :mod:`v2018_05_01.models<azure.mgmt.resource.policy.v2018_05_01.models>`
"""
if api_version == '2015-10-01-preview':
from .v2015_10_01_preview import models
return models
elif api_version == '2016-04-01':
from .v2016_04_01 import models
return models
elif api_version == '2016-12-01':
from .v2016_12_01 import models
return models
elif api_version == '2017-06-01-preview':
from .v2017_06_01_preview import models
return models
elif api_version == '2018-03-01':
from .v2018_03_01 import models
return models
elif api_version == '2018-05-01':
from .v2018_05_01 import models
return models
raise NotImplementedError("APIVersion {} is not available".format(api_version)) | 0.005867 |
def set_chat_photo(
self,
chat_id: Union[int, str],
photo: str
) -> bool:
"""Use this method to set a new profile photo for the chat.
Photos can't be changed for private chats.
You must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note:
In regular groups (non-supergroups), this method will only work if the "All Members Are Admins"
setting is off.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
photo (``str``):
New chat photo. You can pass a :class:`Photo` id or a file path to upload a new photo.
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
``ValueError`` if a chat_id belongs to user.
"""
peer = self.resolve_peer(chat_id)
if os.path.exists(photo):
photo = types.InputChatUploadedPhoto(file=self.save_file(photo))
else:
s = unpack("<qq", b64decode(photo + "=" * (-len(photo) % 4), "-_"))
photo = types.InputChatPhoto(
id=types.InputPhoto(
id=s[0],
access_hash=s[1],
file_reference=b""
)
)
if isinstance(peer, types.InputPeerChat):
self.send(
functions.messages.EditChatPhoto(
chat_id=peer.chat_id,
photo=photo
)
)
elif isinstance(peer, types.InputPeerChannel):
self.send(
functions.channels.EditPhoto(
channel=peer,
photo=photo
)
)
else:
raise ValueError("The chat_id \"{}\" belongs to a user".format(chat_id))
return True | 0.003996 |
def replace_nulls(hdrs):
"""Replace '' in hdrs."""
ret = []
idx = 0
for hdr in hdrs:
if hdr == '':
ret.append("no_hdr{}".format(idx))
else:
ret.append(hdr)
return ret | 0.007634 |
def merge_commit(commit):
"Fetches the latest code and merges up the specified commit."
with cd(env.path):
run('git fetch')
if '@' in commit:
branch, commit = commit.split('@')
run('git checkout {0}'.format(branch))
run('git merge {0}'.format(commit)) | 0.003257 |
def update_build_properties(self, document, project, build_id):
"""UpdateBuildProperties.
[Preview API] Updates properties for a build.
:param :class:`<[JsonPatchOperation]> <azure.devops.v5_0.build.models.[JsonPatchOperation]>` document: A json-patch document describing the properties to update.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: :class:`<object> <azure.devops.v5_0.build.models.object>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
content = self._serialize.body(document, '[JsonPatchOperation]')
response = self._send(http_method='PATCH',
location_id='0a6312e9-0627-49b7-8083-7d74a64849c9',
version='5.0-preview.1',
route_values=route_values,
content=content,
media_type='application/json-patch+json')
return self._deserialize('object', response) | 0.004743 |
def setup_figcanvas(self):
"""Setup the FigureCanvas."""
self.figcanvas = FigureCanvas(background_color=self.background_color)
self.figcanvas.installEventFilter(self)
self.setWidget(self.figcanvas) | 0.008734 |
def _AddToSingleColumnTable(self, tableName, columnHeading, newValue):
"""
Add an entry to a table containing a single column. Checks existing
table entries to avoid duplicate entries if the given value already
exists in the table.
Parameters
----------
tableName : string
Name of table to add entry to.
columnHeading : string
Name of column heading.
newValue : string
New value to add to table.
"""
match = None
currentTable = self._GetFromSingleColumnTable(tableName)
if currentTable is not None:
for currentValue in currentTable:
if currentValue == newValue:
match = True
if match is None:
goodlogging.Log.Info("DB", "Adding {0} to {1} table".format(newValue, tableName), verbosity=self.logVerbosity)
self._ActionDatabase("INSERT INTO {0} VALUES (?)".format(tableName), (newValue, ))
else:
goodlogging.Log.Info("DB", "{0} already exists in {1} table".format(newValue, tableName), verbosity=self.logVerbosity)
############################################################################
# _GetFromSingleColumnTable
############################################################################
"""
Get all entries from a table containing a single column.
Parameters
----------
tableName : string
Name of table to add entry to.
Returns
----------
list or None
If either no table or no rows are found this returns None, otherwise a
list of all table entries is returned.
""" | 0.008217 |
def check_ups_input_frequency(the_session, the_helper, the_snmp_value):
"""
OID .1.3.6.1.2.1.33.1.3.3.1.2.1
MIB excerpt
The present input frequency.
"""
a_frequency = calc_frequency_from_snmpvalue(the_snmp_value)
the_helper.add_metric(
label=the_helper.options.type,
value=a_frequency,
uom='Hz')
the_helper.set_summary("Input Frequency is {} Hz".format(a_frequency)) | 0.002364 |
def on_heartbeat(self, message):
"""
Runs on a heartbeat event from websocket connection
Args:
message (dict): Full message from Discord websocket connection"
"""
logger.info("Got a heartbeat")
logger.info("Heartbeat message: {}".format(message))
self.heartbeat_thread.update_sequence(message['d'])
return | 0.005222 |
def execute():
""" Ensure provisioning """
boto_server_error_retries = 3
# Ensure provisioning
for table_name, table_key in sorted(dynamodb.get_tables_and_gsis()):
try:
table_num_consec_read_checks = \
CHECK_STATUS['tables'][table_name]['reads']
except KeyError:
table_num_consec_read_checks = 0
try:
table_num_consec_write_checks = \
CHECK_STATUS['tables'][table_name]['writes']
except KeyError:
table_num_consec_write_checks = 0
try:
# The return var shows how many times the scale-down criteria
# has been met. This is coupled with a var in config,
# "num_intervals_scale_down", to delay the scale-down
table_num_consec_read_checks, table_num_consec_write_checks = \
table.ensure_provisioning(
table_name,
table_key,
table_num_consec_read_checks,
table_num_consec_write_checks)
CHECK_STATUS['tables'][table_name] = {
'reads': table_num_consec_read_checks,
'writes': table_num_consec_write_checks
}
gsi_names = set()
# Add regexp table names
for gst_instance in dynamodb.table_gsis(table_name):
gsi_name = gst_instance[u'IndexName']
try:
gsi_keys = get_table_option(table_key, 'gsis').keys()
except AttributeError:
# Continue if there are not GSIs configured
continue
for gsi_key in gsi_keys:
try:
if re.match(gsi_key, gsi_name):
logger.debug(
'Table {0} GSI {1} matches '
'GSI config key {2}'.format(
table_name, gsi_name, gsi_key))
gsi_names.add((gsi_name, gsi_key))
except re.error:
logger.error('Invalid regular expression: "{0}"'.format(
gsi_key))
sys.exit(1)
for gsi_name, gsi_key in sorted(gsi_names):
unique_gsi_name = ':'.join([table_name, gsi_name])
try:
gsi_num_consec_read_checks = \
CHECK_STATUS['gsis'][unique_gsi_name]['reads']
except KeyError:
gsi_num_consec_read_checks = 0
try:
gsi_num_consec_write_checks = \
CHECK_STATUS['gsis'][unique_gsi_name]['writes']
except KeyError:
gsi_num_consec_write_checks = 0
gsi_num_consec_read_checks, gsi_num_consec_write_checks = \
gsi.ensure_provisioning(
table_name,
table_key,
gsi_name,
gsi_key,
gsi_num_consec_read_checks,
gsi_num_consec_write_checks)
CHECK_STATUS['gsis'][unique_gsi_name] = {
'reads': gsi_num_consec_read_checks,
'writes': gsi_num_consec_write_checks
}
except JSONResponseError as error:
exception = error.body['__type'].split('#')[1]
if exception == 'ResourceNotFoundException':
logger.error('{0} - Table {1} does not exist anymore'.format(
table_name,
table_name))
continue
except BotoServerError as error:
if boto_server_error_retries > 0:
logger.error(
'Unknown boto error. Status: "{0}". '
'Reason: "{1}". Message: {2}'.format(
error.status,
error.reason,
error.message))
logger.error(
'Please bug report if this error persists')
boto_server_error_retries -= 1
continue
else:
raise
# Sleep between the checks
if not get_global_option('run_once'):
logger.debug('Sleeping {0} seconds until next check'.format(
get_global_option('check_interval')))
time.sleep(get_global_option('check_interval')) | 0.00044 |
def _build_abbreviation_regex(input_string):
""" builds a recursive regex based on an input string to find possible abbreviations more simply.
e.g. = punct(u(a(t(i(on?)?)?)?)?)?
:param input_string: str, input string
:return: str, output regex
"""
result=''
for char in input_string:
result += '[%s%s]?' % (char.upper(), char.lower())
return '(%s)' % result | 0.009132 |
def prepare_env(self):
"""
Manages reading environment metadata files under ``private_data_dir`` and merging/updating
with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily
"""
try:
passwords = self.loader.load_file('env/passwords', Mapping)
self.expect_passwords = {
re.compile(pattern, re.M): password
for pattern, password in iteritems(passwords)
}
except ConfigurationError:
output.debug('Not loading passwords')
self.expect_passwords = dict()
self.expect_passwords[pexpect.TIMEOUT] = None
self.expect_passwords[pexpect.EOF] = None
try:
# seed env with existing shell env
self.env = os.environ.copy()
envvars = self.loader.load_file('env/envvars', Mapping)
if envvars:
self.env.update({k:six.text_type(v) for k, v in envvars.items()})
if self.envvars and isinstance(self.envvars, dict):
self.env.update({k:six.text_type(v) for k, v in self.envvars.items()})
except ConfigurationError:
output.debug("Not loading environment vars")
# Still need to pass default environment to pexpect
self.env = os.environ.copy()
try:
self.settings = self.loader.load_file('env/settings', Mapping)
except ConfigurationError:
output.debug("Not loading settings")
self.settings = dict()
try:
self.ssh_key_data = self.loader.load_file('env/ssh_key', string_types)
except ConfigurationError:
output.debug("Not loading ssh key")
self.ssh_key_data = None
self.idle_timeout = self.settings.get('idle_timeout', None)
self.job_timeout = self.settings.get('job_timeout', None)
self.pexpect_timeout = self.settings.get('pexpect_timeout', 5)
self.process_isolation = self.settings.get('process_isolation', self.process_isolation)
self.process_isolation_executable = self.settings.get('process_isolation_executable', self.process_isolation_executable)
self.process_isolation_path = self.settings.get('process_isolation_path', self.process_isolation_path)
self.process_isolation_hide_paths = self.settings.get('process_isolation_hide_paths', self.process_isolation_hide_paths)
self.process_isolation_show_paths = self.settings.get('process_isolation_show_paths', self.process_isolation_show_paths)
self.process_isolation_ro_paths = self.settings.get('process_isolation_ro_paths', self.process_isolation_ro_paths)
self.pexpect_use_poll = self.settings.get('pexpect_use_poll', True)
self.suppress_ansible_output = self.settings.get('suppress_ansible_output', self.quiet)
self.directory_isolation_cleanup = bool(self.settings.get('directory_isolation_cleanup', True))
if 'AD_HOC_COMMAND_ID' in self.env or not os.path.exists(self.project_dir):
self.cwd = self.private_data_dir
else:
if self.directory_isolation_path is not None:
self.cwd = self.directory_isolation_path
else:
self.cwd = self.project_dir
if 'fact_cache' in self.settings:
if 'fact_cache_type' in self.settings:
if self.settings['fact_cache_type'] == 'jsonfile':
self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])
else:
self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache']) | 0.005427 |
def remote_space_available(self, search_pattern=r"(\d+) bytes free"):
"""Return space available on remote device."""
remote_cmd = 'system "df {}"'.format(self.folder_name)
remote_output = self.ssh_ctl_chan.send_command_expect(remote_cmd)
for line in remote_output.splitlines():
if self.folder_name in line:
space_available = line.split()[-3]
break
return int(space_available) | 0.004357 |
def _precompute_euristics(self):
"""
Предвычисляет будущие символы и стоимости операций с ними
для h-эвристики
"""
if self.euristics is None:
return
# вычисление минимальной стоимости операции,
# приводящей к появлению ('+') или исчезновению ('-') данного символа
removal_costs = {a : np.inf for a in self.alphabet}
insertion_costs = {a : np.inf for a in self.alphabet}
if self.allow_spaces:
removal_costs[' '] = np.inf
insertion_costs[' '] = np.inf
for up, costs in self.transducer.operation_costs.items():
for low, cost in costs.items():
if up == low:
continue
if up != '':
removal_cost = cost / len(up)
for a in up:
removal_costs[a] = min(removal_costs[a], removal_cost)
if low != '':
insertion_cost = cost / len(low)
for a in low:
insertion_costs[a] = min(insertion_costs[a], insertion_cost)
# предвычисление возможных будущих символов в узлах дерева
# precompute_future_symbols(self.dictionary, self.euristics, self.allow_spaces)
# предвычисление стоимостей потери символа в узлах дерева
self._absense_costs_by_node = _precompute_absense_costs(
self.dictionary, removal_costs, insertion_costs,
self.euristics, self.allow_spaces)
# массив для сохранения эвристик
self._temporary_euristics = [dict() for i in range(len(self.dictionary))] | 0.004258 |
def densu(alt, dlb, tinf, tlb, xm, alpha, tz, zlb, s2, mn1, zn1, tn1, tgn1):
'''
/* Calculate Temperature and Density Profiles for MSIS models
* New lower thermo polynomial
*/
tz, zn1, tn1, and tgn1 are simulated pointers
'''
rgas = 831.4
#rgas = 831.44621 #maybe make this a global constant?
densu_temp = 1.0
xs = [0.0]*5
ys = [0.0]*5
y2out = [0.0]*5
#/* joining altitudes of Bates and spline */
za=zn1[0];
if (alt>za):
z=alt;
else:
z=za;
#/* geopotential altitude difference from ZLB */
zg2 = zeta(z, zlb);
#/* Bates temperature */
tt = tinf - (tinf - tlb) * exp(-s2*zg2);
ta = tt;
tz[0] = tt
densu_temp = tz[0]
if (alt<za):
#/* calculate temperature below ZA
# * temperature gradient at ZA from Bates profile */
dta = (tinf - ta) * s2 * pow(((re[0]+zlb)/(re[0]+za)),2.0);
tgn1[0]=dta;
tn1[0]=ta;
if (alt>zn1[mn1-1]):
z=alt;
else:
z=zn1[mn1-1];
mn=mn1;
z1=zn1[0];
z2=zn1[mn-1];
t1=tn1[0];
t2=tn1[mn-1];
#/* geopotental difference from z1 */
zg = zeta (z, z1);
zgdif = zeta(z2, z1);
#/* set up spline nodes */
for k in range(mn):
xs[k] = zeta(zn1[k], z1) / zgdif;
ys[k] = 1.0 / tn1[k];
#/* end node derivatives */
yd1 = -tgn1[0] / (t1*t1) * zgdif;
yd2 = -tgn1[1] / (t2*t2) * zgdif * pow(((re[0]+z2)/(re[0]+z1)),2.0);
#/* calculate spline coefficients */
spline (xs, ys, mn, yd1, yd2, y2out);
x = zg / zgdif;
y = [0.0]
splint (xs, ys, y2out, mn, x, y);
#/* temperature at altitude */
tz[0] = 1.0 / y[0];
densu_temp = tz[0];
if (xm==0):
return densu_temp;
#/* calculate density above za */
glb = gsurf[0] / pow((1.0 + zlb/re[0]),2.0);
gamma = xm * glb / (s2 * rgas * tinf);
expl = exp(-s2 * gamma * zg2);
if (expl>50.0): # pragma: no cover
expl=50.0;
if (tt<=0): # pragma: no cover
expl=50.0;
#/* density at altitude */
densa = dlb * pow((tlb/tt),((1.0+alpha+gamma))) * expl;
densu_temp=densa;
if (alt>=za):
return densu_temp;
#/* calculate density below za */
glb = gsurf[0] / pow((1.0 + z1/re[0]),2.0);
gamm = xm * glb * zgdif / rgas;
#/* integrate spline temperatures */
yi = [0]
splini (xs, ys, y2out, mn, x, yi);
expl = gamm * yi[0];
if (expl>50.0): # pragma: no cover
expl=50.0;
if (tz[0]<=0): # pragma: no cover
expl=50.0;
#/* density at altitude */
densu_temp = densu_temp * pow ((t1 / tz[0]),(1.0 + alpha)) * exp(-expl);
return densu_temp; | 0.036891 |
def map(self, map_function):
"""Return a new Streamlet by applying map_function to each element of this Streamlet.
"""
from heronpy.streamlet.impl.mapbolt import MapStreamlet
map_streamlet = MapStreamlet(map_function, self)
self._add_child(map_streamlet)
return map_streamlet | 0.006689 |
def extractValue(self, model, item):
"""
Get the class name of the factory referenced by a port.
@param model: Either a TabularDataModel or a ScrollableView, depending
on what this column is part of.
@param item: A port item instance (as defined by L{xmantissa.port}).
@rtype: C{unicode}
@return: The name of the class of the item to which this column's
attribute refers.
"""
factory = super(FactoryColumn, self).extractValue(model, item)
return factory.__class__.__name__.decode('ascii') | 0.003454 |
def update_field(self, name, value):
"""Changes the definition of a KV Store field.
:param name: name of field to change
:type name: ``string``
:param value: new field definition
:type value: ``string``
:return: Result of POST request
"""
kwargs = {}
kwargs['field.' + name] = value
return self.post(**kwargs) | 0.005115 |
def update_user_password(new_pwd_user_id, new_password,**kwargs):
"""
Update a user's password
"""
#check_perm(kwargs.get('user_id'), 'edit_user')
try:
user_i = db.DBSession.query(User).filter(User.id==new_pwd_user_id).one()
user_i.password = bcrypt.hashpw(str(new_password).encode('utf-8'), bcrypt.gensalt())
return user_i
except NoResultFound:
raise ResourceNotFoundError("User (id=%s) not found"%(new_pwd_user_id)) | 0.014675 |
def _ecc_static_length_signature(key, algorithm, digest):
"""Calculates an elliptic curve signature with a static length using pre-calculated hash.
:param key: Elliptic curve private key
:type key: cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey
:param algorithm: Master algorithm to use
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param bytes digest: Pre-calculated hash digest
:returns: Signature with required length
:rtype: bytes
"""
pre_hashed_algorithm = ec.ECDSA(Prehashed(algorithm.signing_hash_type()))
signature = b""
while len(signature) != algorithm.signature_len:
_LOGGER.debug(
"Signature length %d is not desired length %d. Recalculating.", len(signature), algorithm.signature_len
)
signature = key.sign(digest, pre_hashed_algorithm)
if len(signature) != algorithm.signature_len:
# Most of the time, a signature of the wrong length can be fixed
# by negating s in the signature relative to the group order.
_LOGGER.debug(
"Signature length %d is not desired length %d. Negating s.", len(signature), algorithm.signature_len
)
r, s = decode_dss_signature(signature)
s = _ECC_CURVE_PARAMETERS[algorithm.signing_algorithm_info.name].order - s
signature = encode_dss_signature(r, s)
return signature | 0.004155 |
def astype(self, dtype):
"""Return a copy of this space with new ``dtype``.
Parameters
----------
dtype :
Scalar data type of the returned space. Can be provided
in any way the `numpy.dtype` constructor understands, e.g.
as built-in type or as a string. Data types with non-trivial
shapes are not allowed.
Returns
-------
newspace : `TensorSpace`
Version of this space with given data type.
"""
if dtype is None:
# Need to filter this out since Numpy iterprets it as 'float'
raise ValueError('`None` is not a valid data type')
dtype = np.dtype(dtype)
if dtype == self.dtype:
return self
if is_numeric_dtype(self.dtype):
# Caching for real and complex versions (exact dtype mappings)
if dtype == self.__real_dtype:
if self.__real_space is None:
self.__real_space = self._astype(dtype)
return self.__real_space
elif dtype == self.__complex_dtype:
if self.__complex_space is None:
self.__complex_space = self._astype(dtype)
return self.__complex_space
else:
return self._astype(dtype)
else:
return self._astype(dtype) | 0.001431 |
def reload(self, reload_timeout=300, save_config=True):
"""Reload the device.
CSM_DUT#reload
System configuration has been modified. Save? [yes/no]: yes
Building configuration...
[OK]
Proceed with reload? [confirm]
"""
SAVE_CONFIG = re.compile(re.escape("System configuration has been modified. Save? [yes/no]: "))
PROCEED = re.compile(re.escape("Proceed with reload? [confirm]"))
IMAGE = re.compile("Passing control to the main image")
BOOTSTRAP = re.compile("System Bootstrap")
LOCATED = re.compile("Located .*")
RETURN = re.compile(re.escape("Press RETURN to get started!"))
response = "yes" if save_config else "no"
# 0 1 2 3 4
events = [SAVE_CONFIG, PROCEED, LOCATED, RETURN, self.username_re,
self.password_re, BOOTSTRAP, IMAGE, TIMEOUT, EOF]
# 5 6 7 8 9
transitions = [
(SAVE_CONFIG, [0], 1, partial(a_send_line, response), 60),
(PROCEED, [0, 1], 2, partial(a_send, "\r"), reload_timeout),
(LOCATED, [2], 2, a_message_callback, reload_timeout),
# if timeout try to send the reload command again
(TIMEOUT, [0], 0, partial(a_send_line, self.reload_cmd), 10),
(BOOTSTRAP, [2], -1, a_disconnect, reload_timeout),
(IMAGE, [2], 3, a_message_callback, reload_timeout),
(self.username_re, [3], -1, a_return_and_reconnect, 0),
(self.password_re, [3], -1, a_return_and_reconnect, 0),
(RETURN, [3], -1, a_return_and_reconnect, 0),
(TIMEOUT, [2], -1, a_disconnect, 0),
(EOF, [0, 1, 2, 3], -1, a_disconnect, 0)
]
fsm = FSM("IOS-RELOAD", self.device, events, transitions, timeout=10)
return fsm.run() | 0.001566 |
def create_config(cls, cfgfile, nick, twtfile, twturl, disclose_identity, add_news):
"""Create a new config file at the default location.
:param str cfgfile: path to the config file
:param str nick: nickname to use for own tweets
:param str twtfile: path to the local twtxt file
:param str twturl: URL to the remote twtxt file
:param bool disclose_identity: if true the users id will be disclosed
:param bool add_news: if true follow twtxt news feed
"""
cfgfile_dir = os.path.dirname(cfgfile)
if not os.path.exists(cfgfile_dir):
os.makedirs(cfgfile_dir)
cfg = configparser.ConfigParser()
cfg.add_section("twtxt")
cfg.set("twtxt", "nick", nick)
cfg.set("twtxt", "twtfile", twtfile)
cfg.set("twtxt", "twturl", twturl)
cfg.set("twtxt", "disclose_identity", str(disclose_identity))
cfg.set("twtxt", "character_limit", "140")
cfg.set("twtxt", "character_warning", "140")
cfg.add_section("following")
if add_news:
cfg.set("following", "twtxt", "https://buckket.org/twtxt_news.txt")
conf = cls(cfgfile, cfg)
conf.write_config()
return conf | 0.00241 |
def move_file_to_directory(file_path, directory_path):
"""Moves file to given directory
:param file_path: path to file to move
:param directory_path: path to target directory where to move file
"""
file_name = os.path.basename(file_path) # get name of file
if not os.path.exists(directory_path):
os.makedirs(directory_path) # create directory if necessary
os.rename(file_path, os.path.join(directory_path,
file_name)) | 0.003774 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.