text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_grid_qubits(arc: SquareGrid, nodes: Iterator[int]) -> List[cirq.GridQubit]:
"""Gets a list of :py:class:GridQubit` s corresponding to the qubit nodes provided on the given Architecture.
:param arc: The grid Architecture
:param nodes: An iterator of node index values
:return: The list of qubits
"""
return [cirq.GridQubit(*arc.qind_to_squind(i)) for i in nodes] | 0.007407 |
def expr(self):
"""
expr: term (('+' | '-') term)*
"""
node = self.term()
while self.token.nature in (Nature.PLUS, Nature.MINUS):
token = self.token
if token.nature == Nature.PLUS:
self._process(Nature.PLUS)
elif token.nature == Nature.MINUS:
self._process(Nature.MINUS)
else:
self._error()
node = BinaryOperation(left=node, op=token, right=self.term())
return node | 0.003817 |
def wrap_exception(exception, cause):
"""
Wraps another exception into specified application exception object.
If original exception is of ApplicationException type it is returned without changes.
Otherwise the original error is set as a cause to specified ApplicationException object.
:param exception: an ApplicationException object to wrap the cause
:param cause: an original error object
:return: an original or newly created ApplicationException
"""
if isinstance(cause, ApplicationException):
return cause
exception.with_cause(cause)
return exception | 0.007474 |
def is_grammar_generating(grammar, remove=False):
# type: (Grammar, bool) -> bool
"""
Check if is grammar is generating.
Generating grammar generates at least one sentence.
:param grammar: Grammar to check.
:param remove: True to remove nongenerating symbols from the grammar.
:return: True if is grammar generating, false otherwise.
"""
g = ContextFree.remove_nongenerating_nonterminals(grammar, remove)
return g.start is not None | 0.005871 |
def get_keys(self):
"""
Return the keys of the instruction
:rtype: a list of long
"""
return [(self.first_key + i) for i in range(0, len(self.targets))] | 0.010363 |
def state_to_xarray(state):
'''Convert a dictionary of climlab.Field objects to xarray.Dataset
Input: dictionary of climlab.Field objects
(e.g. process.state or process.diagnostics dictionary)
Output: xarray.Dataset object with all spatial axes,
including 'bounds' axes indicating cell boundaries in each spatial dimension.
Any items in the dictionary that are not instances of climlab.Field
are ignored.'''
from climlab.domain.field import Field
ds = Dataset()
for name, field in state.items():
if isinstance(field, Field):
ds[name] = Field_to_xarray(field)
dom = field.domain
for axname, ax in dom.axes.items():
bounds_name = axname + '_bounds'
ds.coords[bounds_name] = DataArray(ax.bounds, dims=[bounds_name],
coords={bounds_name:ax.bounds})
try:
ds[bounds_name].attrs['units'] = ax.units
except:
pass
else:
warnings.warn('{} excluded from Dataset because it is not a Field variable.'.format(name))
return ds | 0.006019 |
def relogin(self):
"""Perform a re-login"""
try:
self.__auth = None # reset auth before relogin
self.login()
except ZabbixAPIException as e:
self.log(ERROR, 'Zabbix API relogin error (%s)', e)
self.__auth = None # logged_in() will always return False
raise | 0.005848 |
def download(name, course, github='SheffieldML/notebook/master/lab_classes/'):
"""Download a lab class from the relevant course
:param course: the course short name to download the class from.
:type course: string
:param reference: reference to the course for downloading the class.
:type reference: string
:param github: github repo for downloading the course from.
:type string: github repo for downloading the lab."""
github_stub = 'https://raw.githubusercontent.com/'
if not name.endswith('.ipynb'):
name += '.ipynb'
from pods.util import download_url
download_url(os.path.join(github_stub, github, course, name), store_directory=course) | 0.002882 |
def rake(strike, dip, rake_angle):
"""
Calculates the longitude and latitude of the linear feature(s) specified by
`strike`, `dip`, and `rake_angle`.
Parameters
----------
strike : number or sequence of numbers
The strike of the plane(s) in degrees, with dip direction indicated by
the azimuth (e.g. 315 vs. 135) specified following the "right hand
rule".
dip : number or sequence of numbers
The dip of the plane(s) in degrees.
rake_angle : number or sequence of numbers
The angle of the lineation on the plane measured in degrees downward
from horizontal. Zero degrees corresponds to the "right- hand"
direction indicated by the strike, while 180 degrees or a negative
angle corresponds to the opposite direction.
Returns
-------
lon, lat : Arrays of longitude and latitude in radians.
"""
strike, dip, rake_angle = np.atleast_1d(strike, dip, rake_angle)
# Plot the approriate point for a strike of 0 and rotate it
dip = 90 - dip
lon = dip
rake_angle = rake_angle.copy()
rake_angle[rake_angle < 0] += 180
lat = 90 - rake_angle
lon, lat = _rotate(lon, lat, strike)
return lon, lat | 0.000812 |
def inFileHierarchy(self):
'''
Whether or not this node should be included in the file view hierarchy. Helper
method for :func:`~exhale.graph.ExhaleNode.toHierarchy`. Sets the member
variable ``self.in_file_hierarchy`` to True if appropriate.
:Return (bool):
True if this node should be included in the file view --- either it is a
node of kind ``file``, or it is a ``dir`` that one or more if its
descendants was a ``file``. Returns False otherwise.
'''
if self.kind == "file":
# flag that this file is already in the directory view so that potential
# missing files can be found later.
self.in_file_hierarchy = True
return True
elif self.kind == "dir":
for c in self.children:
if c.inFileHierarchy():
return True
return False | 0.006417 |
def _prepare_batch_request(self):
"""Prepares headers and body for a batch request.
:rtype: tuple (dict, str)
:returns: The pair of headers and body of the batch request to be sent.
:raises: :class:`ValueError` if no requests have been deferred.
"""
if len(self._requests) == 0:
raise ValueError("No deferred requests")
multi = MIMEMultipart()
for method, uri, headers, body in self._requests:
subrequest = MIMEApplicationHTTP(method, uri, headers, body)
multi.attach(subrequest)
# The `email` package expects to deal with "native" strings
if six.PY3: # pragma: NO COVER Python3
buf = io.StringIO()
else:
buf = io.BytesIO()
generator = Generator(buf, False, 0)
generator.flatten(multi)
payload = buf.getvalue()
# Strip off redundant header text
_, body = payload.split("\n\n", 1)
return dict(multi._headers), body | 0.001969 |
def design(self):
"""Returns the designed values.
:returns: list of designed values (G, t, channel_W, obstacle_n)
:rtype: int
"""
floc_dict = {'channel_n': self.channel_n,
'channel_L': self.channel_L,
'channel_W': self.channel_W,
'baffle_S': self.baffle_S,
'obstacle_n': self.obstacle_n,
'G': self.vel_grad_avg,
't': self.retention_time,
'expansion_max_H': self.expansion_max_H,
'drain_ND': self.drain_ND}
return floc_dict | 0.003135 |
def uid(i):
"""
Input: {}
Output: {
Output from 'gen_uid' function
}
"""
o=i.get('out','')
r=gen_uid({})
if r['return']>0: return r
if o=='con':
out(r['data_uid'])
return r | 0.032258 |
def bounds(self, axis, view=None):
"""Get the bounds of the Visual
Parameters
----------
axis : int
The axis.
view : instance of VisualView
The view to use.
"""
if view is None:
view = self
if axis not in self._vshare.bounds:
self._vshare.bounds[axis] = self._compute_bounds(axis, view)
return self._vshare.bounds[axis] | 0.004545 |
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo) | 0.007194 |
def vn_release(call=None, kwargs=None):
'''
Releases a virtual network lease that was previously on hold.
.. versionadded:: 2016.3.0
vn_id
The ID of the virtual network from which to release the lease. Can be
used instead of ``vn_name``.
vn_name
The name of the virtual network from which to release the lease.
Can be used instead of ``vn_id``.
path
The path to a file defining the template of the lease to release.
Syntax within the file can be the usual attribute=value or XML. Can be
used instead of ``data``.
data
Contains the template defining the lease to release. Syntax can be the
usual attribute=value or XML. Can be used instead of ``path``.
CLI Example:
.. code-block:: bash
salt-cloud -f vn_release opennebula vn_id=3 path=/path/to/vn_release_file.txt
salt-cloud =f vn_release opennebula vn_name=my-vn data="LEASES=[IP=192.168.0.5]"
'''
if call != 'function':
raise SaltCloudSystemExit(
'The vn_reserve function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
vn_id = kwargs.get('vn_id', None)
vn_name = kwargs.get('vn_name', None)
path = kwargs.get('path', None)
data = kwargs.get('data', None)
if vn_id:
if vn_name:
log.warning(
'Both the \'vn_id\' and \'vn_name\' arguments were provided. '
'\'vn_id\' will take precedence.'
)
elif vn_name:
vn_id = get_vn_id(kwargs={'name': vn_name})
else:
raise SaltCloudSystemExit(
'The vn_release function requires a \'vn_id\' or a \'vn_name\' to '
'be provided.'
)
if data:
if path:
log.warning(
'Both the \'data\' and \'path\' arguments were provided. '
'\'data\' will take precedence.'
)
elif path:
with salt.utils.files.fopen(path, mode='r') as rfh:
data = rfh.read()
else:
raise SaltCloudSystemExit(
'The vn_release function requires either \'data\' or a \'path\' to '
'be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
response = server.one.vn.release(auth, int(vn_id), data)
ret = {
'action': 'vn.release',
'released': response[0],
'resource_id': response[1],
'error_code': response[2],
}
return ret | 0.001574 |
def notebook_for_kernel(self, kernel_id):
"""Return the notebook_id for a kernel_id or None."""
notebook_ids = [k for k, v in self._notebook_mapping.iteritems() if v == kernel_id]
if len(notebook_ids) == 1:
return notebook_ids[0]
else:
return None | 0.009901 |
def get_irradiance(self, surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth, dni, ghi, dhi,
dni_extra=None, airmass=None, model='haydavies',
**kwargs):
"""
Uses the :func:`irradiance.get_total_irradiance` function to
calculate the plane of array irradiance components on a tilted
surface defined by the input data and ``self.albedo``.
For a given set of solar zenith and azimuth angles, the
surface tilt and azimuth parameters are typically determined
by :py:meth:`~SingleAxisTracker.singleaxis`.
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.
surface_azimuth : numeric
Panel azimuth from north
solar_zenith : numeric
Solar zenith angle.
solar_azimuth : numeric
Solar azimuth angle.
dni : float or Series
Direct Normal Irradiance
ghi : float or Series
Global horizontal irradiance
dhi : float or Series
Diffuse horizontal irradiance
dni_extra : float or Series, default None
Extraterrestrial direct normal irradiance
airmass : float or Series, default None
Airmass
model : String, default 'haydavies'
Irradiance model.
**kwargs
Passed to :func:`irradiance.total_irrad`.
Returns
-------
poa_irradiance : DataFrame
Column names are: ``total, beam, sky, ground``.
"""
# not needed for all models, but this is easier
if dni_extra is None:
dni_extra = irradiance.get_extra_radiation(solar_zenith.index)
if airmass is None:
airmass = atmosphere.get_relative_airmass(solar_zenith)
return irradiance.get_total_irradiance(surface_tilt,
surface_azimuth,
solar_zenith,
solar_azimuth,
dni, ghi, dhi,
dni_extra=dni_extra,
airmass=airmass,
model=model,
albedo=self.albedo,
**kwargs) | 0.002006 |
def set_disk1(self, disk1):
"""
Sets the size (MB) for PCMCIA disk1.
:param disk1: disk1 size (integer)
"""
yield from self._hypervisor.send('vm set_disk1 "{name}" {disk1}'.format(name=self._name, disk1=disk1))
log.info('Router "{name}" [{id}]: disk1 updated from {old_disk1}MB to {new_disk1}MB'.format(name=self._name,
id=self._id,
old_disk1=self._disk1,
new_disk1=disk1))
self._disk1 = disk1 | 0.009309 |
def parse_s2bins(s2bins):
"""
parse ggKbase scaffold-to-bin mapping
- scaffolds-to-bins and bins-to-scaffolds
"""
s2b = {}
b2s = {}
for line in s2bins:
line = line.strip().split()
s, b = line[0], line[1]
if 'UNK' in b:
continue
if len(line) > 2:
g = ' '.join(line[2:])
else:
g = 'n/a'
b = '%s\t%s' % (b, g)
s2b[s] = b
if b not in b2s:
b2s[b] = []
b2s[b].append(s)
return s2b, b2s | 0.005607 |
def show_lbaas_l7policy(self, l7policy, **_params):
"""Fetches information of a certain listener's L7 policy."""
return self.get(self.lbaas_l7policy_path % l7policy,
params=_params) | 0.00905 |
def _read24(self, register):
"""Read an unsigned 24-bit value as a floating point and return it."""
ret = 0.0
for b in self._read_register(register, 3):
ret *= 256.0
ret += float(b & 0xFF)
return ret | 0.007843 |
def matrix_index(user):
"""
Returns the keys associated with each axis of the matrices.
The first key is always the name of the current user, followed by the
sorted names of all the correspondants.
"""
other_keys = sorted([k for k in user.network.keys() if k != user.name])
return [user.name] + other_keys | 0.002985 |
def find_ei(data, nb=1000, save=False, save_folder='.', fmt='svg',
site_correction=False, return_new_dirs=False):
"""
Applies series of assumed flattening factor and "unsquishes" inclinations assuming tangent function.
Finds flattening factor that gives elongation/inclination pair consistent with TK03;
or, if correcting by site instead of for study-level secular variation,
finds flattening factor that minimizes elongation and most resembles a
Fisherian distribution.
Finds bootstrap confidence bounds
Required Parameter
-----------
data: a nested list of dec/inc pairs
Optional Parameters (defaults are used unless specified)
-----------
nb: number of bootstrapped pseudo-samples (default is 1000)
save: Boolean argument to save plots (default is False)
save_folder: path to folder in which plots should be saved (default is current directory)
fmt: specify format of saved plots (default is 'svg')
site_correction: Boolean argument to specify whether to "unsquish" data to
1) the elongation/inclination pair consistent with TK03 secular variation model
(site_correction = False)
or
2) a Fisherian distribution (site_correction = True). Default is FALSE.
Note that many directions (~ 100) are needed for this correction to be reliable.
return_new_dirs: optional return of newly "unflattened" directions (default is False)
Returns
-----------
four plots: 1) equal area plot of original directions
2) Elongation/inclination pairs as a function of f, data plus 25 bootstrap samples
3) Cumulative distribution of bootstrapped optimal inclinations plus uncertainties.
Estimate from original data set plotted as solid line
4) Orientation of principle direction through unflattening
NOTE: If distribution does not have a solution, plot labeled: Pathological. Some bootstrap samples may have
valid solutions and those are plotted in the CDFs and E/I plot.
"""
print("Bootstrapping.... be patient")
print("")
sys.stdout.flush()
upper, lower = int(round(.975 * nb)), int(round(.025 * nb))
E, I = [], []
plt.figure(num=1, figsize=(4, 4))
plot_net(1)
plot_di(di_block=data)
plt.title('Original')
ppars = pmag.doprinc(data)
Io = ppars['inc']
n = ppars["N"]
Es, Is, Fs, V2s = pmag.find_f(data)
if site_correction == True:
Inc, Elong = Is[Es.index(min(Es))], Es[Es.index(min(Es))]
flat_f = Fs[Es.index(min(Es))]
else:
Inc, Elong = Is[-1], Es[-1]
flat_f = Fs[-1]
plt.figure(num=2, figsize=(4, 4))
plt.plot(Is, Es, 'r')
plt.xlabel("Inclination")
plt.ylabel("Elongation")
plt.text(Inc, Elong, ' %3.1f' % (flat_f))
plt.text(Is[0] - 2, Es[0], ' %s' % ('f=1'))
b = 0
while b < nb:
bdata = pmag.pseudo(data)
Esb, Isb, Fsb, V2sb = pmag.find_f(bdata)
if b < 25:
plt.plot(Isb, Esb, 'y')
if Esb[-1] != 0:
ppars = pmag.doprinc(bdata)
if site_correction == True:
I.append(abs(Isb[Esb.index(min(Esb))]))
E.append(Esb[Esb.index(min(Esb))])
else:
I.append(abs(Isb[-1]))
E.append(Esb[-1])
b += 1
I.sort()
E.sort()
Eexp = []
for i in I:
Eexp.append(pmag.EI(i))
plt.plot(I, Eexp, 'g-')
if Inc == 0:
title = 'Pathological Distribution: ' + \
'[%7.1f, %7.1f]' % (I[lower], I[upper])
else:
title = '%7.1f [%7.1f, %7.1f]' % (Inc, I[lower], I[upper])
cdf_fig_num = 3
plt.figure(num=cdf_fig_num, figsize=(4, 4))
pmagplotlib.plot_cdf(cdf_fig_num, I, 'Inclinations', 'r', title)
pmagplotlib.plot_vs(cdf_fig_num, [I[lower], I[upper]], 'b', '--')
pmagplotlib.plot_vs(cdf_fig_num, [Inc], 'g', '-')
pmagplotlib.plot_vs(cdf_fig_num, [Io], 'k', '-')
# plot corrected directional data
di_lists = unpack_di_block(data)
if len(di_lists) == 3:
decs, incs, intensity = di_lists
if len(di_lists) == 2:
decs, incs = di_lists
if flat_f:
unsquished_incs = unsquish(incs, flat_f)
plt.figure(num=4, figsize=(4, 4))
plot_net(4)
plot_di(decs, unsquished_incs)
plt.title('Corrected for flattening')
else:
plt.figure(num=4, figsize=(4, 4))
plot_net(4)
plot_di(decs, incs)
plt.title('Corrected for flattening')
if (Inc, Elong, flat_f) == (0, 0, 0):
print("PATHOLOGICAL DISTRIBUTION")
print("The original inclination was: " + str(Io))
print("")
print("The corrected inclination is: " + str(Inc))
print("with bootstrapped confidence bounds of: " +
str(I[lower]) + ' to ' + str(I[upper]))
print("and elongation parameter of: " + str(Elong))
print("The flattening factor is: " + str(flat_f))
if return_new_dirs is True:
return make_di_block(decs, unsquished_incs) | 0.002559 |
def _compute_vectors(self):
"""Compute the star's position as an ICRF position and velocity."""
# Use 1 gigaparsec for stars whose parallax is zero.
parallax = self.parallax_mas
if parallax <= 0.0:
parallax = 1.0e-6
# Convert right ascension, declination, and parallax to position
# vector in equatorial system with units of au.
dist = 1.0 / sin(parallax * 1.0e-3 * ASEC2RAD)
r = self.ra.radians
d = self.dec.radians
cra = cos(r)
sra = sin(r)
cdc = cos(d)
sdc = sin(d)
self._position_au = array((
dist * cdc * cra,
dist * cdc * sra,
dist * sdc,
))
# Compute Doppler factor, which accounts for change in light
# travel time to star.
k = 1.0 / (1.0 - self.radial_km_per_s / C * 1000.0)
# Convert proper motion and radial velocity to orthogonal
# components of motion with units of au/day.
pmr = self.ra_mas_per_year / (parallax * 365.25) * k
pmd = self.dec_mas_per_year / (parallax * 365.25) * k
rvl = self.radial_km_per_s * DAY_S / self.au_km * k
# Transform motion vector to equatorial system.
self._velocity_au_per_d = array((
- pmr * sra - pmd * sdc * cra + rvl * cdc * cra,
pmr * cra - pmd * sdc * sra + rvl * cdc * sra,
pmd * cdc + rvl * sdc,
)) | 0.002729 |
def clearScreen(cls):
"""Clear the screen"""
if "win32" in sys.platform:
os.system('cls')
elif "linux" in sys.platform:
os.system('clear')
elif 'darwin' in sys.platform:
os.system('clear')
else:
cit.err("No clearScreen for " + sys.platform) | 0.006098 |
def __create_checksum(self, p):
"""
Calculates the checksum of the packet to be sent to the time clock
Copied from zkemsdk.c
"""
l = len(p)
checksum = 0
while l > 1:
checksum += unpack('H', pack('BB', p[0], p[1]))[0]
p = p[2:]
if checksum > const.USHRT_MAX:
checksum -= const.USHRT_MAX
l -= 2
if l:
checksum = checksum + p[-1]
while checksum > const.USHRT_MAX:
checksum -= const.USHRT_MAX
checksum = ~checksum
while checksum < 0:
checksum += const.USHRT_MAX
return pack('H', checksum) | 0.005848 |
def find_atoms_within_distance(atoms, cutoff_distance, point):
"""Returns atoms within the distance from the point.
Parameters
----------
atoms : [ampal.atom]
A list of `ampal.atoms`.
cutoff_distance : float
Maximum distance from point.
point : (float, float, float)
Reference point, 3D coordinate.
Returns
-------
filtered_atoms : [ampal.atoms]
`atoms` list filtered by distance.
"""
return [x for x in atoms if distance(x, point) <= cutoff_distance] | 0.00189 |
def get_prediction_score(self, node_id):
"""
Return the prediction score (if leaf node) or None if its an
intermediate node.
Parameters
----------
node_id: id of the node to get the prediction value.
Returns
-------
float or None: returns float value of prediction if leaf node and None
if not.
Examples
--------
.. sourcecode:: python
>>> tree.get_prediction_score(120) # Leaf node
0.251092
>>> tree.get_prediction_score(120) # Not a leaf node
None
"""
_raise_error_if_not_of_type(node_id, [int,long], "node_id")
_numeric_param_check_range("node_id", node_id, 0, self.num_nodes - 1)
node = self.nodes[node_id]
return None if node.is_leaf is False else node.value | 0.003476 |
def tlog(x, th=1, r=_display_max, d=_l_mmax):
"""
Truncated log10 transform.
Parameters
----------
x : num | num iterable
values to be transformed.
th : num
values below th are transormed to 0.
Must be positive.
r : num (default = 10**4)
maximal transformed value.
d : num (default = log10(2**18))
log10 of maximal possible measured value.
tlog(10**d) = r
Returns
-------
Array of transformed values.
"""
if th <= 0:
raise ValueError('Threshold value must be positive. %s given.' % th)
return where(x <= th, log10(th) * 1. * r / d, log10(x) * 1. * r / d) | 0.001497 |
def train_batch(self, batch_info: BatchInfo) -> None:
"""
Batch - the most atomic unit of learning.
For this reinforforcer, that involves:
1. Roll out the environmnent using current policy
2. Use that rollout to train the policy
"""
# Calculate environment rollout on the evaluation version of the model
self.model.train()
rollout = self.env_roller.rollout(batch_info, self.model, self.settings.number_of_steps)
# Process rollout by the 'algo' (e.g. perform the advantage estimation)
rollout = self.algo.process_rollout(batch_info, rollout)
# Perform the training step
# Algo will aggregate data into this list:
batch_info['sub_batch_data'] = []
if self.settings.shuffle_transitions:
rollout = rollout.to_transitions()
if self.settings.stochastic_experience_replay:
# Always play experience at least once
experience_replay_count = max(np.random.poisson(self.settings.experience_replay), 1)
else:
experience_replay_count = self.settings.experience_replay
# Repeat the experience N times
for i in range(experience_replay_count):
# We may potentially need to split rollout into multiple batches
if self.settings.batch_size >= rollout.frames():
batch_result = self.algo.optimizer_step(
batch_info=batch_info,
device=self.device,
model=self.model,
rollout=rollout.to_device(self.device)
)
batch_info['sub_batch_data'].append(batch_result)
else:
# Rollout too big, need to split in batches
for batch_rollout in rollout.shuffled_batches(self.settings.batch_size):
batch_result = self.algo.optimizer_step(
batch_info=batch_info,
device=self.device,
model=self.model,
rollout=batch_rollout.to_device(self.device)
)
batch_info['sub_batch_data'].append(batch_result)
batch_info['frames'] = rollout.frames()
batch_info['episode_infos'] = rollout.episode_information()
# Even with all the experience replay, we count the single rollout as a single batch
batch_info.aggregate_key('sub_batch_data') | 0.00242 |
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {} | 0.040765 |
def arbiter_priority(req_vec, gnt_vec=None, gnt_idx=None, gnt_vld=None):
""" Static priority arbiter: grants the request with highest priority, which is the lower index
req_vec - (i) vector of request signals, req_vec[0] is with the highest priority
gnt_vec - (o) optional, vector of grants, one grant per request, only one grant can be active at at time
gnt_idx - (o) optional, grant index, index of the granted request
gnt_vld - (o) optional, grant valid, indicate that there is a granted request
"""
REQ_NUM = len(req_vec)
gnt_vec_s = Signal(intbv(0)[REQ_NUM:])
gnt_idx_s = Signal(intbv(0, min=0, max=REQ_NUM))
gnt_vld_s = Signal(bool(0))
@always_comb
def prioroty_encoder():
gnt_vec_s.next = 0
gnt_idx_s.next = 0
gnt_vld_s.next = 0
for i in range(REQ_NUM):
if ( req_vec[i]==1 ):
gnt_vec_s.next[i] = 1
gnt_idx_s.next = i
gnt_vld_s.next = 1
break
if gnt_vec!=None: _vec = assign(gnt_vec, gnt_vec_s)
if gnt_idx!=None: _idx = assign(gnt_idx, gnt_idx_s)
if gnt_vld!=None: _vld = assign(gnt_vld, gnt_vld_s)
return instances() | 0.013821 |
def remove(self, contact_id, session):
'''taobao.logistics.address.remove 删除卖家地址库
用此接口删除卖家地址库'''
request = TOPRequest('taobao.logistics.address.remove')
request['contact_id'] = contact_id
self.create(self.execute(request, session))
return self.address_result | 0.009524 |
def set_pointer0d(subseqs):
"""Set_pointer function for 0-dimensional link sequences."""
print(' . set_pointer0d')
lines = Lines()
lines.add(1, 'cpdef inline set_pointer0d'
'(self, str name, pointerutils.PDouble value):')
for seq in subseqs:
lines.add(2, 'if name == "%s":' % seq.name)
lines.add(3, 'self.%s = value.p_value' % seq.name)
return lines | 0.004415 |
def AdjustDescriptor(self, fields):
"""Payload-aware metadata processor."""
for f in fields:
if f.name == "args_rdf_name":
f.name = "payload_type"
if f.name == "args":
f.name = "payload"
return fields | 0.012346 |
def delete_object_async(self, path, **kwds):
"""DELETE an object.
Note: No payload argument is supported.
"""
return self.do_request_async(self.api_url + path, 'DELETE', **kwds) | 0.005155 |
def description(self):
"""This read-only attribute is a sequence of 7-item sequences.
Each of these sequences contains information describing one result column:
- name
- type_code
- display_size (None in current implementation)
- internal_size (None in current implementation)
- precision (None in current implementation)
- scale (None in current implementation)
- null_ok (always True in current implementation)
The ``type_code`` can be interpreted by comparing it to the Type Objects specified in the
section below.
"""
# Sleep until we're done or we got the columns
if self._columns is None:
return []
return [
# name, type_code, display_size, internal_size, precision, scale, null_ok
(col[0], col[1], None, None, None, None, True) for col in self._columns
] | 0.006466 |
def define_request(
dataset, query=None, crs="epsg:4326", bounds=None, sortby=None, pagesize=10000
):
"""Define the getfeature request parameters required to download a dataset
References:
- http://www.opengeospatial.org/standards/wfs
- http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html
- http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html
"""
# validate the table name and find out how many features it holds
table = validate_name(dataset)
n = bcdata.get_count(table, query=query)
# DataBC WFS getcapabilities says that it supports paging,
# and the spec says that responses should include 'next URI'
# (section 7.7.4.4.1)....
# But I do not see any next uri in the responses. Instead of following
# the paged urls, for datasets with >10k records, just generate urls
# based on number of features in the dataset.
chunks = math.ceil(n / pagesize)
# if making several requests, we need to sort by something
if chunks > 1 and not sortby:
sortby = get_sortkey(table)
# build the request parameters for each chunk
param_dicts = []
for i in range(chunks):
request = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"outputFormat": "json",
"SRSNAME": crs,
}
if sortby:
request["sortby"] = sortby
if query:
request["CQL_FILTER"] = query
if bounds:
request["bbox"] = ",".join([str(b) for b in bounds])
if chunks > 1:
request["startIndex"] = i * pagesize
request["count"] = pagesize
param_dicts.append(request)
return param_dicts | 0.00112 |
def save(self):
"""
Saves changes made to the locally cached DesignDocument object's data
structures to the remote database. If the design document does not
exist remotely then it is created in the remote database. If the object
does exist remotely then the design document is updated remotely. In
either case the locally cached DesignDocument object is also updated
accordingly based on the successful response of the operation.
"""
if self.views:
if self.get('language', None) != QUERY_LANGUAGE:
for view_name, view in self.iterviews():
if isinstance(view, QueryIndexView):
raise CloudantDesignDocumentException(104, view_name)
else:
for view_name, view in self.iterviews():
if not isinstance(view, QueryIndexView):
raise CloudantDesignDocumentException(105, view_name)
if self.indexes:
if self.get('language', None) != QUERY_LANGUAGE:
for index_name, search in self.iterindexes():
# Check the instance of the javascript search function
if not isinstance(search['index'], STRTYPE):
raise CloudantDesignDocumentException(106, index_name)
else:
for index_name, index in self.iterindexes():
if not isinstance(index['index'], dict):
raise CloudantDesignDocumentException(107, index_name)
for prop in self._nested_object_names:
if not getattr(self, prop):
# Ensure empty dict for each sub-object is not saved remotely.
self.__delitem__(prop)
super(DesignDocument, self).save()
for prop in self._nested_object_names:
# Ensure views, indexes, and lists dict exist in locally cached DesignDocument.
getattr(self, prop, self.setdefault(prop, dict())) | 0.00197 |
def materialize_entity(ctx, etype, unique=None):
'''
Low-level routine for creating a BIBFRAME resource. Takes the entity (resource) type and a data mapping
according to the resource type. Implements the Libhub Resource Hash Convention
As a convenience, if a vocabulary base is provided in the context, concatenate it to etype and the data keys
ctx - context information governing creation of the new entity
etype - type IRI for th enew entity
unique - list of key/value tuples of data to use in generating its unique ID, or None in which case one is just randomly generated
'''
params = {}
if ctx.base:
etype = ctx.base + etype
unique_computed = []
for k, v in unique:
k = k if iri.is_absolute(k) else iri.absolutize(k, ctx.base)
v = v(ctx) if callable(v) else v
unique_computed.append((k, v))
if unique_computed:
plaintext = json.dumps([etype, unique_computed], cls=OrderedJsonEncoder)
eid = ctx.idgen.send(plaintext)
else:
#We only have a type; no other distinguishing data. Generate a random hash
eid = next(ctx.idgen)
return eid | 0.006897 |
def to_pb(self):
"""Render a protobuf message.
Returns:
google.iam.policy_pb2.Policy: a message to be passed to the
``set_iam_policy`` gRPC API.
"""
return policy_pb2.Policy(
etag=self.etag,
version=self.version or 0,
bindings=[
policy_pb2.Binding(role=role, members=sorted(self[role]))
for role in self
],
) | 0.004405 |
def reload(self, reload_timeout=300, save_config=True):
"""Reload the device.
CSM_DUT#reload
System configuration has been modified. Save? [yes/no]: yes
Building configuration...
[OK]
Proceed with reload? [confirm]
"""
response = "yes" if save_config else "no"
events = [SAVE_CONFIG, PROCEED, pexpect.TIMEOUT, pexpect.EOF]
transitions = [
(SAVE_CONFIG, [0], 1, partial(a_send_line, response), 60),
(PROCEED, [0, 1], 2, partial(a_send, "\r"), reload_timeout),
# if timeout try to send the reload command again
(pexpect.TIMEOUT, [0], 0, partial(a_send_line, self.reload_cmd), 10),
(pexpect.TIMEOUT, [2], -1, a_reconnect, 0),
(pexpect.EOF, [0, 1, 2], -1, a_disconnect, 0)
]
fsm = FSM("IOS-RELOAD", self.device, events, transitions, timeout=10, max_transitions=5)
return fsm.run() | 0.004184 |
def xml(self, indent = ""):
"""Produce Template XML"""
xml = indent + "<OutputTemplate id=\"" + self.id + "\" format=\"" + self.formatclass.__name__ + "\"" + " label=\"" + self.label + "\""
if self.formatclass.mimetype:
xml +=" mimetype=\""+self.formatclass.mimetype+"\""
if self.formatclass.schema:
xml +=" schema=\""+clam.common.util.xmlescape(self.formatclass.schema)+"\""
if self.filename:
xml +=" filename=\""+clam.common.util.xmlescape(self.filename)+"\""
if self.extension:
xml +=" extension=\""+clam.common.util.xmlescape(self.extension)+"\""
if self.parent:
xml +=" parent=\""+clam.common.util.xmlescape(self.parent)+"\""
if self.unique:
xml +=" unique=\"yes\""
else:
xml +=" unique=\"no\""
xml += ">\n"
for metafield in self.metafields:
xml += metafield.xml(indent + " ") + "\n"
xml += indent + "</OutputTemplate>"
return xml | 0.013474 |
def cloudwatch_connection(self):
""" Lazy create a connection to cloudwatch """
if self._cloudwatch_connection is None:
conn = self._session.create_client("cloudwatch", self.connection.region)
self._cloudwatch_connection = conn
return self._cloudwatch_connection | 0.009677 |
def _makedirs(self, path):
"""Make folders recursively for the given path and
check read and write permission on the path
Args:
path -- path to the leaf folder
"""
try:
oldmask = os.umask(0)
os.makedirs(path, self._conf['dmode'])
os.umask(oldmask)
except OSError as e:
if(e.errno == errno.EACCES):
raise Exception('not sufficent permissions to write on fsdb folder: "{0}"'.format(path))
elif(e.errno == errno.EEXIST):
fstat = os.stat(path)
if not stat.S_ISDIR(fstat.st_mode):
raise Exception('fsdb folder already exists but it is not a regular folder: "{0}"'.format(path))
elif not os.access(path, os.R_OK and os.W_OK):
raise Exception('not sufficent permissions to write on fsdb folder: "{0}"'.format(path))
else:
raise e | 0.005107 |
def markLoadingStarted(self):
"""
Marks this widget as loading records.
"""
if self.isThreadEnabled():
XLoaderWidget.start(self)
if self.showTreePopup():
tree = self.treePopupWidget()
tree.setCursor(Qt.WaitCursor)
tree.clear()
tree.setUpdatesEnabled(False)
tree.blockSignals(True)
self._baseHints = (self.hint(), tree.hint())
tree.setHint('Loading records...')
self.setHint('Loading records...')
else:
self._baseHints = (self.hint(), '')
self.setHint('Loading records...')
self.setCursor(Qt.WaitCursor)
self.blockSignals(True)
self.setUpdatesEnabled(False)
# prepare to load
self.clear()
use_dummy = not self.isRequired() or self.isCheckable()
if use_dummy:
self.addItem('')
self.loadingStarted.emit() | 0.00675 |
def _list_function_infos(jvm):
"""
Returns a list of function information via JVM. Sorts wrapped expression infos by name
and returns them.
"""
jinfos = jvm.org.apache.spark.sql.api.python.PythonSQLUtils.listBuiltinFunctionInfos()
infos = []
for jinfo in jinfos:
name = jinfo.getName()
usage = jinfo.getUsage()
usage = usage.replace("_FUNC_", name) if usage is not None else usage
infos.append(ExpressionInfo(
className=jinfo.getClassName(),
name=name,
usage=usage,
arguments=jinfo.getArguments().replace("_FUNC_", name),
examples=jinfo.getExamples().replace("_FUNC_", name),
note=jinfo.getNote(),
since=jinfo.getSince(),
deprecated=jinfo.getDeprecated()))
return sorted(infos, key=lambda i: i.name) | 0.003488 |
def _import_next_layer(self, proto, length):
"""Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer
"""
if self._exproto == 'null' and self._exlayer == 'None':
from pcapkit.protocols.raw import Raw as NextLayer
else:
from pcapkit.foundation.analysis import analyse as NextLayer
# from pcapkit.foundation.analysis import analyse as NextLayer
if length == 0:
next_ = NoPayload()
elif self._onerror:
next_ = beholder_ng(NextLayer)(self._file, length, _termination=self._sigterm)
else:
next_ = NextLayer(self._file, length, _termination=self._sigterm)
return next_ | 0.002907 |
def _StackSummary_extract(frame_gen, limit=None, lookup_lines=True, capture_locals=False):
"""
Replacement for :func:`StackSummary.extract`.
Create a StackSummary from a traceback or stack object.
Very simplified copy of the original StackSummary.extract().
We want always to capture locals, that is why we overwrite it.
Additionally, we also capture the frame.
This is a bit hacky and also not like this is originally intended (to not keep refs).
:param frame_gen: A generator that yields (frame, lineno) tuples to
include in the stack.
:param limit: None to include all frames or the number of frames to
include.
:param lookup_lines: If True, lookup lines for each frame immediately,
otherwise lookup is deferred until the frame is rendered.
:param capture_locals: If True, the local variables from each frame will
be captured as object representations into the FrameSummary.
"""
result = StackSummary()
for f, lineno in frame_gen:
co = f.f_code
filename = co.co_filename
name = co.co_name
result.append(ExtendedFrameSummary(
frame=f, filename=filename, lineno=lineno, name=name, lookup_line=False))
return result | 0.003192 |
def _chk_fld(self, ntd, name, qty_min=0, qty_max=None):
"""Further split a GAF value within a single field."""
vals = getattr(ntd, name)
num_vals = len(vals)
if num_vals < qty_min:
self.illegal_lines['MIN QTY'].append(
(-1, "FIELD({F}): MIN QUANTITY({Q}) WASN'T MET: {V}".format(F=name, Q=qty_min, V=vals)))
if qty_max is not None:
if num_vals > qty_max:
self.illegal_lines['MAX QTY'].append(
(-1, "FIELD({F}): MAX QUANTITY({Q}) EXCEEDED: {V}\n{NT}".format(
F=name, Q=qty_max, V=vals, NT=ntd))) | 0.006309 |
def advance_for_next_slice(self, recovery_slice=False):
"""Advance self for next slice.
Args:
recovery_slice: True if this slice is running recovery logic.
See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery
for more info.
"""
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False
if recovery_slice:
self.slice_id += 2
else:
self.slice_id += 1 | 0.006237 |
def print_app_info (out=stderr):
"""Print system and application info (output defaults to stderr)."""
print(_("System info:"), file=out)
print(configuration.App, file=out)
print(_("Released on:"), configuration.ReleaseDate, file=out)
print(_("Python %(version)s on %(platform)s") %
{"version": sys.version, "platform": sys.platform}, file=out)
for key in PYTHON_ENV_VARS:
print_env_info(key, out=out)
print(configuration.get_modules_info(), file=out)
stime = strformat.strtime(time.time())
print(_("Local time:"), stime, file=out)
print(_("sys.argv:"), sys.argv, file=out) | 0.00625 |
def config():
'''
Shows the current configuration.
'''
config = get_config()
print('Client version: {0}'.format(click.style(__version__, bold=True)))
print('API endpoint: {0}'.format(click.style(str(config.endpoint), bold=True)))
print('API version: {0}'.format(click.style(config.version, bold=True)))
print('Access key: "{0}"'.format(click.style(config.access_key, bold=True)))
masked_skey = config.secret_key[:6] + ('*' * 24) + config.secret_key[-10:]
print('Secret key: "{0}"'.format(click.style(masked_skey, bold=True)))
print('Signature hash type: {0}'.format(
click.style(config.hash_type, bold=True)))
print('Skip SSL certificate validation? {0}'.format(
click.style(str(config.skip_sslcert_validation), bold=True))) | 0.003807 |
def read(path, cfg={}, raise_on_error=False, silent=False, verbose=False, return_errors=False):
"""Wraps pandas.IO & odo to create a dictionary of pandas.DataFrames from multiple different sources
Parameters
----------
path : str
Location of file, folder or zip-file to be parsed. Can include globbing (e.g. `*.csv`).
Can be remote with URI-notation beginning with e.g. http://, https://, file://, ftp://, s3:// and ssh://.
Can be odo-supported database (SQL, MongoDB, Hadoop, Spark) if dependencies are available.
Parser will be selected based on file extension.
cfg : dict or str, optional
Dictionary of kwargs to be provided to the pandas parser (http://pandas.pydata.org/pandas-docs/stable/api.html#input-output)
or str with path to YAML, that will be parsed.
Special keys:
**filters** : str or list of strings, optional. For a file to be processed, it must contain one of the Strings (e.g. ['.csv', '.tsv'])
**default** : kwargs to be used for every file
**custom_date_parser** : strptime-format string (https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior), generates a parser that used as the *date_parser* argument
If filename in keys, use kwargs from that key in addition to or overwriting *default* kwargs.
silent : boolean, optional
If True, doesn't print to stdout.
verbose : boolean, optional
If True, prints parsing arguments for each file processed to stdout.
raise_on_error : boolean, optional
Raise exception or only display warning, if a file cannot be parsed successfully.
return_errors : boolean, optional
If True, read() returns (data, errors) tuple instead of only data, with errors as a list of all files that could not be parsed.
Returns
-------
data : dict
Dictionary of parsed pandas.DataFrames, with file names as keys.
Notes
-----
- Start with basic cfg and tune until the desired parsing result is achieved.
- File extensions are critical to determine the parser, make sure they are *common*.
- Avoid files named 'default' or 'filters'.
- Avoid duplicate file names.
- Subfolders and file names beginning with '.' or '_' are ignored.
- If an https:// URI isn't correctly processed, try http:// instead.
- To connect to a database or s3-bucket, make sure the required dependencies like sqlalchemy, pymongo, pyspark or boto are available in the active environment.
"""
if type(cfg) == str:
cfg = os.path.abspath(os.path.expanduser(cfg))
yml = _read_yaml(cfg)
if yml == None:
if not silent:
print('creating read.yml config file draft ...')
cfg = {'filters': ['.csv'], 'default': {'sep': ',', 'parse_dates': []}}
with open('local/read.yml', 'xt') as f:
yaml.dump(cfg, f)
yml = _read_yaml('local/read.yml')
if 'filters' in yml:
filters = yml['filters']
if type(filters) == str:
filters = [filters]
del yml['filters']
else:
filters = []
cfg = yml
data = {}
errors = []
if not silent:
print('processing', path, '...')
for f in _path_to_objs(path):
if type(f) == str:
fname = os.path.basename(f)
elif type(f) == zipfile.ZipExtFile:
fname = f.name
else:
raise RuntimeError('_path_to_objs() returned unknown type', f)
data, errors = _read_append(data=data, errors=errors, path=f, fname=fname, \
cfg=cfg, raise_on_error=raise_on_error, silent=silent, verbose=verbose)
if raise_on_error and data == {}:
raise IOError('path is invalid or empty')
if not silent:
print('imported {} DataFrames'.format(len(data)))
if len(data) > 0:
print('total memory usage: {}'.format(mem(data)))
if len(errors) > 0:
print('import errors in files: {}'.format(', '.join(errors)))
if return_errors:
return data, errors
else:
return data | 0.008214 |
def is_message_enabled(self, msg_descr, line=None, confidence=None):
"""return true if the message associated to the given message id is
enabled
msgid may be either a numeric or symbolic message id.
"""
if self.config.confidence and confidence:
if confidence.name not in self.config.confidence:
return False
try:
message_definitions = self.msgs_store.get_message_definitions(msg_descr)
msgids = [md.msgid for md in message_definitions]
except UnknownMessageError:
# The linter checks for messages that are not registered
# due to version mismatch, just treat them as message IDs
# for now.
msgids = [msg_descr]
for msgid in msgids:
if self.is_one_message_enabled(msgid, line):
return True
return False | 0.003326 |
def node_validate(node_dict, node_num, cmd_name):
"""Validate that command can be performed on target node."""
# cmd: [required-state, action-to-displayed, error-statement]
req_lu = {"run": ["stopped", "Already Running"],
"stop": ["running", "Already Stopped"],
"connect": ["running", "Can't Connect, Node Not Running"],
"details": [node_dict[node_num].state, ""]}
tm = {True: ("Node {1}{2}{0} ({5}{3}{0} on {1}{4}{0})".
format(C_NORM, C_WARN, node_num,
node_dict[node_num].name,
node_dict[node_num].cloud_disp, C_TI)),
False: req_lu[cmd_name][1]}
node_valid = bool(req_lu[cmd_name][0] == node_dict[node_num].state)
node_info = tm[node_valid]
return node_valid, node_info | 0.001225 |
def _get_branches(self):
"""Get branches from org/repo."""
if self.offline:
local_path = Path(LOCAL_PATH).expanduser() / self.org / self.repo
get_refs = f"git -C {shlex.quote(str(local_path))} show-ref --heads"
else:
get_refs = f"git ls-remote --heads https://github.com/{self.org}/{self.repo}"
try:
# Parse get_refs output for the actual branch names
return (line.split()[1].replace("refs/heads/", "") for line in _run(get_refs, timeout=3).split("\n"))
except Error:
return [] | 0.008489 |
def _gen_doc(cls, summary, full_name, identifier, example_call, doc_str,
show_examples):
"""Generate the documentation docstring for a PlotMethod"""
ret = docstrings.dedents("""
%s
This plotting method adds data arrays and plots them via
:class:`%s` plotters
To plot data from a netCDF file type::
>>> psy.plot.%s(%s)
%s""" % (summary, full_name, identifier, example_call, doc_str))
if show_examples:
ret += '\n\n' + cls._gen_examples(identifier)
return ret | 0.005008 |
def add_headers(vcf_obj, nr_cases=None, sv=False):
"""Add loqus specific information to a VCF header
Args:
vcf_obj(cyvcf2.VCF)
"""
vcf_obj.add_info_to_header(
{
'ID':"Obs",
'Number': '1',
'Type': 'Integer',
'Description': "The number of observations for the variant"}
)
if not sv:
vcf_obj.add_info_to_header(
{
'ID':"Hom",
'Number': '1',
'Type': 'Integer',
'Description': "The number of observed homozygotes"}
)
vcf_obj.add_info_to_header(
{
'ID':"Hem",
'Number': '1',
'Type': 'Integer',
'Description': "The number of observed hemizygotes"}
)
if nr_cases:
case_header = "##NrCases={}".format(nr_cases)
vcf_obj.add_to_header(case_header)
# head.add_version_tracking("loqusdb", version, datetime.now().strftime("%Y-%m-%d %H:%M"))
return | 0.004826 |
def read_local_config(cfg):
""" Parses local config file for override values
Args:
:local_file (str): filename of local config file
Returns:
dict object of values contained in local config file
"""
try:
if os.path.exists(cfg):
config = import_file_object(cfg)
return config
else:
logger.warning(
'%s: local config file (%s) not found, cannot be read' %
(inspect.stack()[0][3], str(cfg)))
except IOError as e:
logger.warning(
'import_file_object: %s error opening %s' % (str(e), str(cfg))
)
return {} | 0.00152 |
def build(self, builder):
"""
Build XML by appending to builder
"""
if self.text is None:
raise ValueError("Text is not set.")
params = {}
if self.sponsor_or_site is not None:
params['SponsorOrSite'] = self.sponsor_or_site
builder.start("Comment", params)
builder.data(self.text)
builder.end("Comment") | 0.005013 |
def scheduled_status(self, id):
"""
Fetch information about the scheduled status with the given id.
Returns a `scheduled toot dict`_.
"""
id = self.__unpack_id(id)
url = '/api/v1/scheduled_statuses/{0}'.format(str(id))
return self.__api_request('GET', url) | 0.00639 |
def isCommaList(inputFilelist):
"""Return True if the input is a comma separated list of names."""
if isinstance(inputFilelist, int) or isinstance(inputFilelist, np.int32):
ilist = str(inputFilelist)
else:
ilist = inputFilelist
if "," in ilist:
return True
return False | 0.003195 |
def themes_path():
"""
Retrieve the location of the themes directory from the location of this package
This is taken from Sphinx's theme documentation
"""
package_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(package_dir, 'themes') | 0.007168 |
def rollback(self):
"""Abandon the current transaction.
Rollback all messages published during the current transaction
session to the remote server.
Note that all messages published during this transaction session
will be lost, and will have to be published again.
A new transaction session starts as soon as the command has
been executed.
:return:
"""
self._tx_active = False
return self._channel.rpc_request(specification.Tx.Rollback()) | 0.00361 |
def decrypt_var(source, passphrase=None):
"""Attempts to decrypt a variable"""
cmd = [gnupg_bin(), "--decrypt", gnupg_home(), gnupg_verbose(),
passphrase_file(passphrase)]
return stderr_with_input(flatten(cmd), source) | 0.004132 |
def next_haab(month, jd):
'''For a given haab month and a julian day count, find the next start of that month on or after the JDC'''
if jd < EPOCH:
raise IndexError("Input day is before Mayan epoch.")
hday, hmonth = to_haab(jd)
if hmonth == month:
days = 1 - hday
else:
count1 = _haab_count(hday, hmonth)
count2 = _haab_count(1, month)
# Find number of days between haab of given jd and desired haab
days = (count2 - count1) % 365
# add in the number of days and return new jd
return jd + days | 0.003484 |
def a_href_finder(pipeline_index,
soup,
finder_image_urls=[],
*args, **kwargs):
"""
Find image URL in <a>'s href attribute
"""
now_finder_image_urls = []
for a in soup.find_all('a'):
href = a.get('href', None)
if href:
href = str(href)
if filter(href.lower().endswith, ('.jpg', '.jpeg', '.gif', '.png')):
if (href not in finder_image_urls) and \
(href not in now_finder_image_urls):
now_finder_image_urls.append(href)
output = {}
output['finder_image_urls'] = finder_image_urls + now_finder_image_urls
return output | 0.005747 |
def push_irq_registers(self):
"""
push PC, U, Y, X, DP, B, A, CC on System stack pointer
"""
self.cycles += 1
self.push_word(self.system_stack_pointer, self.program_counter.value) # PC
self.push_word(self.system_stack_pointer, self.user_stack_pointer.value) # U
self.push_word(self.system_stack_pointer, self.index_y.value) # Y
self.push_word(self.system_stack_pointer, self.index_x.value) # X
self.push_byte(self.system_stack_pointer, self.direct_page.value) # DP
self.push_byte(self.system_stack_pointer, self.accu_b.value) # B
self.push_byte(self.system_stack_pointer, self.accu_a.value) # A
self.push_byte(self.system_stack_pointer, self.get_cc_value()) | 0.014608 |
def diff(self, summary1=None, summary2=None):
"""Compute diff between to summaries.
If no summary is provided, the diff from the last to the current
summary is used. If summary1 is provided the diff from summary1
to the current summary is used. If summary1 and summary2 are
provided, the diff between these two is used.
"""
res = None
if summary2 is None:
self.s1 = self.create_summary()
if summary1 is None:
res = summary.get_diff(self.s0, self.s1)
else:
res = summary.get_diff(summary1, self.s1)
self.s0 = self.s1
else:
if summary1 is not None:
res = summary.get_diff(summary1, summary2)
else:
raise ValueError("You cannot provide summary2 without summary1.")
return summary._sweep(res) | 0.003311 |
def add_virtual_columns_cartesian_velocities_to_spherical(self, x="x", y="y", z="z", vx="vx", vy="vy", vz="vz", vr="vr", vlong="vlong", vlat="vlat", distance=None):
"""Concert velocities from a cartesian to a spherical coordinate system
TODO: errors
:param x: name of x column (input)
:param y: y
:param z: z
:param vx: vx
:param vy: vy
:param vz: vz
:param vr: name of the column for the radial velocity in the r direction (output)
:param vlong: name of the column for the velocity component in the longitude direction (output)
:param vlat: name of the column for the velocity component in the latitude direction, positive points to the north pole (output)
:param distance: Expression for distance, if not given defaults to sqrt(x**2+y**2+z**2), but if this column already exists, passing this expression may lead to a better performance
:return:
"""
# see http://www.astrosurf.com/jephem/library/li110spherCart_en.htm
if distance is None:
distance = "sqrt({x}**2+{y}**2+{z}**2)".format(**locals())
self.add_virtual_column(vr, "({x}*{vx}+{y}*{vy}+{z}*{vz})/{distance}".format(**locals()))
self.add_virtual_column(vlong, "-({vx}*{y}-{x}*{vy})/sqrt({x}**2+{y}**2)".format(**locals()))
self.add_virtual_column(vlat, "-({z}*({x}*{vx}+{y}*{vy}) - ({x}**2+{y}**2)*{vz})/( {distance}*sqrt({x}**2+{y}**2) )".format(**locals())) | 0.006575 |
def waypoint_clear_all_send(self):
'''wrapper for waypoint_clear_all_send'''
if self.mavlink10():
self.mav.mission_clear_all_send(self.target_system, self.target_component)
else:
self.mav.waypoint_clear_all_send(self.target_system, self.target_component) | 0.013245 |
def changelog_file_option_validator(ctx, param, value):
"""Checks that the given file path exists in the current working directory.
Returns a :class:`~pathlib.Path` object. If the file does not exist raises
a :class:`~click.UsageError` exception.
"""
path = Path(value)
if not path.exists():
filename = click.style(path.name, fg="blue", bold=True)
ctx.fail(
"\n"
f" {x_mark} Unable to find {filename}\n"
' Run "$ brau init" to create one'
)
return path | 0.001832 |
def _bytype(self, action_type, action_spec=None):
'''Return the most recent date on which action_type occurred.
Action spec is a dictionary of key-value attrs to match.'''
for action in reversed(self.bill['actions']):
if action_type in action['type']:
for k, v in action_spec.items():
if action[k] == v:
yield action | 0.004854 |
def fit(self, X, y=None, categorical=None):
"""Compute k-prototypes clustering.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
categorical : Index of columns that contain categorical data
"""
if categorical is not None:
assert isinstance(categorical, (int, list, tuple)), "The 'categorical' \
argument needs to be an integer with the index of the categorical \
column in your data, or a list or tuple of several of them, \
but it is a {}.".format(type(categorical))
X = pandas_to_numpy(X)
random_state = check_random_state(self.random_state)
# If self.gamma is None, gamma will be automatically determined from
# the data. The function below returns its value.
self._enc_cluster_centroids, self._enc_map, self.labels_, self.cost_,\
self.n_iter_, self.gamma = k_prototypes(X,
categorical,
self.n_clusters,
self.max_iter,
self.num_dissim,
self.cat_dissim,
self.gamma,
self.init,
self.n_init,
self.verbose,
random_state,
self.n_jobs)
return self | 0.002334 |
def get_or_add_video_media_part(self, video):
"""Return rIds for media and video relationships to media part.
A new |MediaPart| object is created if it does not already exist
(such as would occur if the same video appeared more than once in
a presentation). Two relationships to the media part are created,
one each with MEDIA and VIDEO relationship types. The need for two
appears to be for legacy support for an earlier (pre-Office 2010)
PowerPoint media embedding strategy.
"""
media_part = self._package.get_or_add_media_part(video)
media_rId = self.relate_to(media_part, RT.MEDIA)
video_rId = self.relate_to(media_part, RT.VIDEO)
return media_rId, video_rId | 0.002632 |
def _validate_inputs(self, inputdict):
""" Validate input links.
"""
# Check inputdict
try:
parameters = inputdict.pop(self.get_linkname('parameters'))
except KeyError:
raise InputValidationError("No parameters specified for this "
"calculation")
if not isinstance(parameters, ParameterData):
raise InputValidationError("parameters not of type "
"ParameterData")
# Check code
try:
code = inputdict.pop(self.get_linkname('code'))
except KeyError:
raise InputValidationError("No code specified for this "
"calculation")
# Check input files
try:
structure = inputdict.pop(self.get_linkname('structure'))
if not isinstance(structure, SinglefileData):
raise InputValidationError(
"structure not of type SinglefileData")
except KeyError:
raise InputValidationError(
"No input structure specified for calculation")
try:
surface_sample = inputdict.pop(self.get_linkname('surface_sample'))
if not isinstance(surface_sample, SinglefileData):
raise InputValidationError(
"surface_sample not of type SinglefileData")
except KeyError:
raise InputValidationError(
"No surface sample specified for calculation")
# Check that nothing is left unparsed
if inputdict:
raise ValidationError("Unrecognized inputs: {}".format(inputdict))
return parameters, code, structure, surface_sample | 0.001129 |
def complexidade(obj):
"""
Returns a value that indicates project health, currently FinancialIndicator
is used as this value, but it can be a result of calculation with other
indicators in future
"""
indicators = obj.indicator_set.all()
if not indicators:
value = 0.0
else:
value = indicators.first().value
return value | 0.002695 |
def swipe_top(self, steps=10, *args, **selectors):
"""
Swipe the UI object with *selectors* from center to top
See `Swipe Left` for more details.
"""
self.device(**selectors).swipe.up(steps=steps) | 0.008439 |
def make_skip_list(cts):
"""
Return hand-defined list of place names to skip and not attempt to geolocate. If users would like to exclude
country names, this would be the function to do it with.
"""
# maybe make these non-country searches but don't discard, at least for
# some (esp. bodies of water)
special_terms = ["Europe", "West", "the West", "South Pacific", "Gulf of Mexico", "Atlantic",
"the Black Sea", "Black Sea", "North America", "Mideast", "Middle East",
"the Middle East", "Asia", "the Caucasus", "Africa",
"Central Asia", "Balkans", "Eastern Europe", "Arctic", "Ottoman Empire",
"Asia-Pacific", "East Asia", "Horn of Africa", "Americas",
"North Africa", "the Strait of Hormuz", "Mediterranean", "East", "North",
"South", "Latin America", "Southeast Asia", "Western Pacific", "South Asia",
"Persian Gulf", "Central Europe", "Western Hemisphere", "Western Europe",
"European Union (E.U.)", "EU", "European Union", "E.U.", "Asia-Pacific",
"Europe", "Caribbean", "US", "U.S.", "Persian Gulf", "West Africa", "North", "East",
"South", "West", "Western Countries"
]
# Some words are recurring spacy problems...
spacy_problems = ["Kurd", "Qur'an"]
#skip_list = list(cts.keys()) + special_terms
skip_list = special_terms + spacy_problems
skip_list = set(skip_list)
return skip_list | 0.012829 |
def decrypt(self, text, appid):
"""对解密后的明文进行补位删除
@param text: 密文
@return: 删除填充补位后的明文
"""
try:
cryptor = AES.new(self.key, self.mode, self.key[:16])
# 使用BASE64对密文进行解码,然后AES-CBC解密
plain_text = cryptor.decrypt(base64.b64decode(text))
except Exception as e:
raise DecryptAESError(e)
try:
if six.PY2:
pad = ord(plain_text[-1])
else:
pad = plain_text[-1]
# 去掉补位字符串
# pkcs7 = PKCS7Encoder()
# plain_text = pkcs7.encode(plain_text)
# 去除16位随机字符串
content = plain_text[16:-pad]
xml_len = socket.ntohl(struct.unpack("I", content[: 4])[0])
xml_content = content[4: xml_len + 4]
from_appid = content[xml_len + 4:]
except Exception as e:
raise IllegalBuffer(e)
if from_appid != appid:
raise ValidateAppIDError()
return xml_content | 0.001959 |
def featurize(equity_data, n_sessions, **kwargs):
"""
Generate a raw (unnormalized) feature set from the input data.
The value at `column` on the given date is taken
as a feature, and each row contains values for n_sessions
Parameters
-----------
equity_data : DataFrame
data from which to generate features
n_sessions : int
number of sessions to use as features
selection : str, default: 'Adj Close'
column of `equity_data` from which to generate features.
columns : list, default: ``map(str, range((-n_sessions + 1), 1))``
column names for output DataFrame. Default will look like:
['-5', '-4', '-3', '-2', '-1', '0'].
Returns
----------
out : DataFrame
Each row is a sequence of `n_sessions` session values where
the last column matches the value on the date specified by
the DataFrame index.
Examples
--------
>>> pn.featurize(equity_data, n_sessions, **kwargs)
"""
#Benchmarking
#>>> s = 'from __main__ import data\nimport datetime as dt\n'
#>>> timeit.timeit('data.featurize(data.get("ge", dt.date(1960, 1, 1),
# dt.date(2014, 12, 31)), 256)', setup=s, number=1)
#1.6771750450134277
columns = kwargs.get('columns', map(str, range(-n_sessions + 1, 1)))
selection = kwargs.get('selection', 'Adj Close')
# empty DataFrame with desired index and column labels
features = pd.DataFrame(index=equity_data.index[(n_sessions - 1):],
columns=columns, dtype='float64')
values = equity_data[selection].values
for i in range(n_sessions - 1):
features.iloc[:, i] = values[i:(-n_sessions + i + 1)]
features.iloc[:, n_sessions - 1] = values[(n_sessions - 1):]
return features | 0.003926 |
def _generate_exact_author_query(self, author_name_or_bai):
"""Generates a term query handling authors and BAIs.
Notes:
If given value is a BAI, search for the provided value in the raw field variation of
`ElasticSearchVisitor.AUTHORS_BAI_FIELD`.
Otherwise, the value will be procesed in the same way as the indexed value (i.e. lowercased and normalized
(inspire_utils.normalize_name and then NFKC normalization).
E.g. Searching for 'Smith, J.' is the same as searching for: 'Smith, J', 'smith, j.', 'smith j', 'j smith',
'j. smith', 'J Smith', 'J. Smith'.
"""
if ElasticSearchVisitor.BAI_REGEX.match(author_name_or_bai):
bai = author_name_or_bai.lower()
query = self._generate_term_query(
'.'.join((ElasticSearchVisitor.AUTHORS_BAI_FIELD, FieldVariations.search)),
bai
)
else:
author_name = normalize('NFKC', normalize_name(author_name_or_bai)).lower()
query = self._generate_term_query(
ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['exact-author'],
author_name
)
return generate_nested_query(ElasticSearchVisitor.AUTHORS_NESTED_QUERY_PATH, query) | 0.006126 |
def archive_handler(unused_build_context, target, fetch, package_dir, tar):
"""Handle remote downloadable archive URI.
Download the archive and cache it under the private builer workspace
(unless already downloaded), extract it, and add the content to the
package tar.
TODO(itamar): Support re-downloading if remote changed compared to local.
TODO(itamar): Support more archive formats (currently only tarballs).
"""
package_dest = join(package_dir, basename(urlparse(fetch.uri).path))
package_content_dir = join(package_dir, 'content')
extract_dir = (join(package_content_dir, fetch.name)
if fetch.name else package_content_dir)
fetch_url(fetch.uri, package_dest, package_dir)
# TODO(itamar): Avoid repetition of splitting extension here and above
# TODO(itamar): Don't use `extractall` on potentially untrsuted archives
ext = splitext(package_dest)[-1].lower()
if ext in ('.gz', '.bz2', '.tgz'):
with tarfile.open(package_dest, 'r:*') as src_tar:
src_tar.extractall(extract_dir)
elif ext in ('.zip',):
with ZipFile(package_dest, 'r') as zipf:
zipf.extractall(extract_dir)
else:
raise ValueError('Unsupported extension {}'.format(ext))
tar.add(package_content_dir, arcname=split_name(target.name)) | 0.000746 |
def _get_node(nodes, node_id, fuzzy=True):
"""
Returns a dispatcher node that match the given node id.
:param nodes:
Dispatcher nodes.
:type nodes: dict
:param node_id:
Node id.
:type node_id: str
:return:
The dispatcher node and its id.
:rtype: (str, dict)
"""
try:
return node_id, nodes[node_id] # Return dispatcher node and its id.
except KeyError as ex:
if fuzzy:
it = sorted(nodes.items())
n = next(((k, v) for k, v in it if node_id in k), EMPTY)
if n is not EMPTY:
return n
raise ex | 0.001567 |
def get_response_query_template(service, operation):
"""refers to definition of API in botocore, and autogenerates template
Assume that response format is xml when protocol is query
You can see example of elbv2 from link below.
https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json
"""
client = boto3.client(service)
aws_operation_name = to_upper_camel_case(operation)
op_model = client._service_model.operation_model(aws_operation_name)
result_wrapper = op_model.output_shape.serialization['resultWrapper']
response_wrapper = result_wrapper.replace('Result', 'Response')
metadata = op_model.metadata
xml_namespace = metadata['xmlNamespace']
# build xml tree
t_root = etree.Element(response_wrapper, xmlns=xml_namespace)
# build metadata
t_metadata = etree.Element('ResponseMetadata')
t_request_id = etree.Element('RequestId')
t_request_id.text = '1549581b-12b7-11e3-895e-1334aEXAMPLE'
t_metadata.append(t_request_id)
t_root.append(t_metadata)
# build result
t_result = etree.Element(result_wrapper)
outputs = op_model.output_shape.members
replace_list = []
for output_name, output_shape in outputs.items():
t_result.append(_get_subtree(output_name, output_shape, replace_list))
t_root.append(t_result)
xml_body = etree.tostring(t_root, pretty_print=True).decode('utf-8')
xml_body_lines = xml_body.splitlines()
for replace in replace_list:
name = replace[0]
prefix = replace[1]
singular_name = singularize(name)
start_tag = '<%s>' % name
iter_name = '{}.{}'.format(prefix[-1], name.lower())if prefix else name.lower()
loop_start = '{%% for %s in %s %%}' % (singular_name.lower(), iter_name)
end_tag = '</%s>' % name
loop_end = '{{ endfor }}'
start_tag_indexes = [i for i, l in enumerate(xml_body_lines) if start_tag in l]
if len(start_tag_indexes) != 1:
raise Exception('tag %s not found in response body' % start_tag)
start_tag_index = start_tag_indexes[0]
xml_body_lines.insert(start_tag_index + 1, loop_start)
end_tag_indexes = [i for i, l in enumerate(xml_body_lines) if end_tag in l]
if len(end_tag_indexes) != 1:
raise Exception('tag %s not found in response body' % end_tag)
end_tag_index = end_tag_indexes[0]
xml_body_lines.insert(end_tag_index, loop_end)
xml_body = '\n'.join(xml_body_lines)
body = '\n{}_TEMPLATE = """{}"""'.format(operation.upper(), xml_body)
return body | 0.00191 |
def get_parent_logs(self, log_id):
"""Gets the parent logs of the given ``id``.
arg: log_id (osid.id.Id): the ``Id`` of the ``Log`` to query
return: (osid.logging.LogList) - the parent logs of the ``id``
raise: NotFound - a ``Log`` identified by ``Id is`` not found
raise: NullArgument - ``log_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_parent_bins
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalogs(catalog_id=log_id)
return LogLookupSession(
self._proxy,
self._runtime).get_logs_by_ids(
list(self.get_parent_log_ids(log_id))) | 0.002132 |
def staff_member(view_func):
"""Performs user authentication check.
Similar to Django's `login_required` decorator, except that this throws
:exc:`~leonardo.exceptions.NotAuthenticated` exception if the user is not
signed-in.
"""
@functools.wraps(view_func, assigned=available_attrs(view_func))
def dec(request, *args, **kwargs):
if request.user.is_staff:
return view_func(request, *args, **kwargs)
raise PermissionDenied(_("You haven't permissions to do this action."))
return dec | 0.001845 |
def from_zone(zone):
'''Build a GeoID from a given zone'''
validity = zone.validity.start if zone.validity else None
return build(zone.level, zone.code, validity) | 0.005747 |
def md5hash(self):
"""Return the MD5 hash string of the file content"""
digest = hashlib.md5(self.content).digest()
return b64_string(digest) | 0.012121 |
def copy(value, **kwargs):
"""Return a copy of a **HasProperties** instance
A copy is produced by serializing the HasProperties instance then
deserializing it to a new instance. Therefore, if any properties
cannot be serialized/deserialized, :code:`copy` will fail. Any
keyword arguments will be passed through to both :code:`serialize`
and :code:`deserialize`.
"""
if not isinstance(value, HasProperties):
raise ValueError('properties.copy may only be used to copy'
'HasProperties instances')
kwargs.update({'include_class': kwargs.get('include_class', True)})
kwargs.update({'trusted': kwargs.get('trusted', True)})
return value.__class__.deserialize(value.serialize(**kwargs), **kwargs) | 0.001302 |
def write(self, value):
"""Write a (new) value to this variable."""
assert self.num_write_waits > 0, self
self.num_write_waits -= 1
self.values.append(value)
if self.readable:
LOG.debug('%s is now readable', self.name) | 0.007407 |
def resp_set_location(self, resp, location=None):
"""Default callback for get_location/set_location
"""
if location:
self.location=location
elif resp:
self.location=resp.label.decode().replace("\x00", "") | 0.015385 |
def _check_err(resp, url_suffix, data, allow_pagination):
"""
Raise DataServiceError if the response wasn't successful.
:param resp: requests.Response back from the request
:param url_suffix: str url to include in an error message
:param data: data payload we sent
:param allow_pagination: when False and response headers contains 'x-total-pages' raises an error.
:return: requests.Response containing the successful result
"""
total_pages = resp.headers.get('x-total-pages')
if not allow_pagination and total_pages:
raise UnexpectedPagingReceivedError()
if 200 <= resp.status_code < 300:
return resp
if resp.status_code == 404:
if resp.json().get("code") == "resource_not_consistent":
raise DSResourceNotConsistentError(resp, url_suffix, data)
raise DataServiceError(resp, url_suffix, data) | 0.003168 |
def get_agents_with_name(name, stmts):
"""Return all agents within a list of statements with a particular name."""
return [ag for stmt in stmts for ag in stmt.agent_list()
if ag is not None and ag.name == name] | 0.004348 |
def detect_unicode_support():
''' Try to detect unicode (utf8?) support in the terminal.
Experimental, implementation idea is from the link below:
https://unix.stackexchange.com/questions/184345/detect-how-much-of-unicode-my-terminal-supports-even-through-screen
TODO:
needs improvement.
# should return None or True on redirection?
Returns:
Boolean | None if not a TTY
'''
result = None
if env.LANG and env.LANG.endswith('UTF-8'): # approximation
result = True
elif is_a_tty():
if os_name == 'nt':
from .windows import get_position as _get_position
else:
_get_position = get_position
out = sys.stdout
# what if cursor is not at beginning of line?
x, _ = _get_position()
out.write('é')
out.flush()
x2, _ = _get_position()
difference = x2 - x
if difference == 1:
result = True
else:
result = False # 0, 2 - no
# clean up
out.write(BS)
out.flush()
return result | 0.001765 |
def _update_workflow_stages(stage_data: dict, workflow_stage: WorkflowStage,
docker: DockerSwarmClient):
"""Check and update the status of a workflow stage.
This function checks and updates the status of a workflow stage
specified by the parameters in the specified stage_data dictionary.
If the workflow stage is not marked as complete, this function will
check with the Docker Swarm API on the status of Docker services
defined for the stage. If **all** services are found to be complete
(based on their service state being reported as 'shutdown',
the workflow stage is marked complete.
This function is used by `execute_processing_block`.
TODO(BMo) This function will need refactoring at some point as part
of an update to the way workflow state metadata is stored in the
configuration database. Currently the stage_data dictionary
is a bit of a hack for a badly specified Configuration Database
backed WorkflowStage object.
Args:
stage_data (dict): Dictionary holding workflow stage metadata.
workflow_stage (WorkflowStage): Workflow stage data object.
docker (DockerClient): Docker Swarm Client object.
"""
service_status_complete = []
# FIXME(BMo) is not "complete" -> is "running"
if stage_data["status"] != "complete":
for service_id, service_dict in stage_data['services'].items():
service_state = docker.get_service_state(service_id)
if service_state == 'shutdown':
docker.delete_service(service_id)
service_dict['status'] = service_state
service_dict['complete'] = (service_state == 'shutdown')
service_status_complete.append(service_dict['complete'])
if all(service_status_complete):
LOG.info('Workflow stage service %s complete!',
workflow_stage.id)
stage_data['status'] = "complete" | 0.0005 |
def extend_safe(target, source):
"""
Extends source list to target list only if elements doesn't exists in target list.
:param target:
:type target: list
:param source:
:type source: list
"""
for elt in source:
if elt not in target:
target.append(elt) | 0.006601 |
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match, _join=u('').join,
_PY3=PY3, _maxunicode=sys.maxunicode):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not _PY3 and not isinstance(content, text_type):
content = text_type(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at"
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\X escape sequence %r"
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
msg = "Invalid \\uXXXX escape sequence"
esc = s[end + 1:end + 5]
escX = esc[1:2]
if len(esc) != 4 or escX == 'x' or escX == 'X':
raise JSONDecodeError(msg, s, end - 1)
try:
uni = int(esc, 16)
except ValueError:
raise JSONDecodeError(msg, s, end - 1)
end += 5
# Check for surrogate pair on UCS-4 systems
# Note that this will join high/low surrogate pairs
# but will also pass unpaired surrogates through
if (_maxunicode > 65535 and
uni & 0xfc00 == 0xd800 and
s[end:end + 2] == '\\u'):
esc2 = s[end + 2:end + 6]
escX = esc2[1:2]
if len(esc2) == 4 and not (escX == 'x' or escX == 'X'):
try:
uni2 = int(esc2, 16)
except ValueError:
raise JSONDecodeError(msg, s, end)
if uni2 & 0xfc00 == 0xdc00:
uni = 0x10000 + (((uni - 0xd800) << 10) |
(uni2 - 0xdc00))
end += 6
char = unichr(uni)
# Append the unescaped character
_append(char)
return _join(chunks), end | 0.001149 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.