text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _child_allowed(self, child_rule):
"""Called to verify that the given rule can become a child of the
current node.
:raises AttributeError:
if the child is not allowed
"""
num_kids = self.node.children.count()
num_kids_allowed = len(self.rule.children)
if not self.rule.multiple_paths:
num_kids_allowed = 1
if num_kids >= num_kids_allowed:
raise AttributeError('Rule %s only allows %s children' % (
self.rule_name, self.num_kids_allowed))
# verify not a duplicate
for node in self.node.children.all():
if node.data.rule_label == child_rule.class_label:
raise AttributeError('Child rule already exists')
# check if the given rule is allowed as a child
if child_rule not in self.rule.children:
raise AttributeError('Rule %s is not a valid child of Rule %s' % (
child_rule.__name__, self.rule_name)) | 0.003964 |
def scan(self, ids=range(254)):
""" Pings all ids within the specified list, by default it finds all the motors connected to the bus. """
return [id for id in ids if self.ping(id)] | 0.015306 |
def close_thread_handles(self):
"""
Closes all open handles to threads in the snapshot.
"""
for aThread in self.iter_threads():
try:
aThread.close_handle()
except Exception:
try:
e = sys.exc_info()[1]
msg = "Cannot close thread handle %s, reason: %s"
msg %= (aThread.hThread.value, str(e))
warnings.warn(msg)
except Exception:
pass | 0.003738 |
def apply_types(use_types, guess_type, line):
"""Apply the types on the elements of the line"""
new_line = {}
for k, v in line.items():
if use_types.has_key(k):
new_line[k] = force_type(use_types[k], v)
elif guess_type:
new_line[k] = determine_type(v)
else:
new_line[k] = v
return new_line | 0.005479 |
def json_export(self, dest, fieldnames=None, encoding="UTF-8"):
"""Exports the contents of the table to a JSON-formatted file.
@param dest: output file - if a string is given, the file with that name will be
opened, written, and closed; if a file object is given, then that object
will be written as-is, and left for the caller to be closed.
@type dest: string or file
@param fieldnames: attribute names to be exported; can be given as a single
string with space-delimited names, or as a list of attribute names
@type fieldnames: list of strings
@param encoding: string (default="UTF-8"); if csv_dest is provided as a string
representing an output filename, an encoding argument can be provided (Python 3 only)
@type encoding: string
"""
close_on_exit = False
if isinstance(dest, basestring):
if PY_3:
dest = open(dest, 'w', encoding=encoding)
else:
dest = open(dest, 'w')
close_on_exit = True
try:
if isinstance(fieldnames, basestring):
fieldnames = fieldnames.split()
if fieldnames is None:
do_all(dest.write(_to_json(o)+'\n') for o in self.obs)
else:
do_all(dest.write(json.dumps(ODict((f, getattr(o, f)) for f in fieldnames))+'\n') for o in self.obs)
finally:
if close_on_exit:
dest.close() | 0.007092 |
def pop_empty_columns(self, empty=None):
"""
This will pop columns from the printed columns if they only contain
'' or None
:param empty: list of values to treat as empty
"""
empty = ['', None] if empty is None else empty
if len(self) == 0:
return
for col in list(self.columns):
if self[0][col] in empty:
if not [v for v in self.get_column(col) if v not in empty]:
self.pop_column(col) | 0.003868 |
def migrate_keys(self, host, port, keys, dest_db, timeout, *,
copy=False, replace=False):
"""Atomically transfer keys from one Redis instance to another one.
Keys argument must be list/tuple of keys to migrate.
"""
if not isinstance(host, str):
raise TypeError("host argument must be str")
if not isinstance(timeout, int):
raise TypeError("timeout argument must be int")
if not isinstance(dest_db, int):
raise TypeError("dest_db argument must be int")
if not isinstance(keys, (list, tuple)):
raise TypeError("keys argument must be list or tuple")
if not host:
raise ValueError("Got empty host")
if dest_db < 0:
raise ValueError("dest_db must be greater equal 0")
if timeout < 0:
raise ValueError("timeout must be greater equal 0")
if not keys:
raise ValueError("keys must not be empty")
flags = []
if copy:
flags.append(b'COPY')
if replace:
flags.append(b'REPLACE')
flags.append(b'KEYS')
flags.extend(keys)
fut = self.execute(b'MIGRATE', host, port,
"", dest_db, timeout, *flags)
return wait_ok(fut) | 0.002285 |
def uncrop(data, crinfo, orig_shape, resize=False, outside_mode="constant", cval=0):
"""
Put some boundary to input image.
:param data: input data
:param crinfo: array with minimum and maximum index along each axis
[[minX, maxX],[minY, maxY],[minZ, maxZ]]. If crinfo is None, the whole input image is placed into [0, 0, 0].
If crinfo is just series of three numbers, it is used as an initial point for input image placement.
:param orig_shape: shape of uncropped image
:param resize: True or False (default). Usefull if the data.shape does not fit to crinfo shape.
:param outside_mode: 'constant', 'nearest'
:return:
"""
if crinfo is None:
crinfo = list(zip([0] * data.ndim, orig_shape))
elif np.asarray(crinfo).size == data.ndim:
crinfo = list(zip(crinfo, np.asarray(crinfo) + data.shape))
crinfo = fix_crinfo(crinfo)
data_out = np.ones(orig_shape, dtype=data.dtype) * cval
# print 'uncrop ', crinfo
# print orig_shape
# print data.shape
if resize:
data = resize_to_shape(data, crinfo[:, 1] - crinfo[:, 0])
startx = np.round(crinfo[0][0]).astype(int)
starty = np.round(crinfo[1][0]).astype(int)
startz = np.round(crinfo[2][0]).astype(int)
data_out[
# np.round(crinfo[0][0]).astype(int):np.round(crinfo[0][1]).astype(int)+1,
# np.round(crinfo[1][0]).astype(int):np.round(crinfo[1][1]).astype(int)+1,
# np.round(crinfo[2][0]).astype(int):np.round(crinfo[2][1]).astype(int)+1
startx : startx + data.shape[0],
starty : starty + data.shape[1],
startz : startz + data.shape[2],
] = data
if outside_mode == "nearest":
# for ax in range(data.ndims):
# ax = 0
# copy border slice to pixels out of boundary - the higher part
for ax in range(data.ndim):
# the part under the crop
start = np.round(crinfo[ax][0]).astype(int)
slices = [slice(None), slice(None), slice(None)]
slices[ax] = start
repeated_slice = np.expand_dims(data_out[slices], ax)
append_sz = start
if append_sz > 0:
tile0 = np.repeat(repeated_slice, append_sz, axis=ax)
slices = [slice(None), slice(None), slice(None)]
slices[ax] = slice(None, start)
# data_out[start + data.shape[ax] : , :, :] = tile0
data_out[slices] = tile0
# plt.imshow(np.squeeze(repeated_slice))
# plt.show()
# the part over the crop
start = np.round(crinfo[ax][0]).astype(int)
slices = [slice(None), slice(None), slice(None)]
slices[ax] = start + data.shape[ax] - 1
repeated_slice = np.expand_dims(data_out[slices], ax)
append_sz = data_out.shape[ax] - (start + data.shape[ax])
if append_sz > 0:
tile0 = np.repeat(repeated_slice, append_sz, axis=ax)
slices = [slice(None), slice(None), slice(None)]
slices[ax] = slice(start + data.shape[ax], None)
# data_out[start + data.shape[ax] : , :, :] = tile0
data_out[slices] = tile0
# plt.imshow(np.squeeze(repeated_slice))
# plt.show()
return data_out | 0.002393 |
def OnMouseWheel(self, event):
"""Event handler for mouse wheel actions
Invokes zoom when mouse when Ctrl is also pressed
"""
if event.ControlDown():
if event.WheelRotation > 0:
post_command_event(self.grid, self.grid.ZoomInMsg)
else:
post_command_event(self.grid, self.grid.ZoomOutMsg)
elif self.main_window.IsFullScreen():
if event.WheelRotation > 0:
newtable = self.grid.current_table - 1
else:
newtable = self.grid.current_table + 1
post_command_event(self.grid, self.GridActionTableSwitchMsg,
newtable=newtable)
return
else:
wheel_speed = config["mouse_wheel_speed_factor"]
x, y = self.grid.GetViewStart()
direction = wheel_speed if event.GetWheelRotation() < 0 \
else -wheel_speed
if event.ShiftDown():
# Scroll sideways if shift is pressed.
self.grid.Scroll(x + direction, y)
else:
self.grid.Scroll(x, y + direction) | 0.001712 |
def __lstring(self,lstr):
"""
Returns a parsed lstring by stripping out and instances of
the escaped delimiter. Sometimes the raw lstring has whitespace
and a double quote at the beginning or end. If present, these
are removed.
"""
lstr = self.llsrx.sub('',lstr.encode('ascii'))
lstr = self.rlsrx.sub('',lstr)
lstr = self.xmltostr.xlat(lstr)
lstr = self.dlmrx.sub(',',lstr)
return lstr | 0.011655 |
def save_post(self, title, text, user_id, tags, draft=False,
post_date=None, last_modified_date=None, meta_data=None,
post_id=None):
"""
Persist the blog post data. If ``post_id`` is ``None`` or ``post_id``
is invalid, the post must be inserted into the storage. If ``post_id``
is a valid id, then the data must be updated.
:param title: The title of the blog post
:type title: str
:param text: The text of the blog post
:type text: str
:param user_id: The user identifier
:type user_id: str
:param tags: A list of tags
:type tags: list
:param draft: (Optional) If the post is a draft of if needs to be
published. (default ``False``)
:type draft: bool
:param post_date: (Optional) The date the blog was posted (default
datetime.datetime.utcnow() )
:type post_date: datetime.datetime
:param last_modified_date: (Optional) The date when blog was last
modified (default datetime.datetime.utcnow() )
:type last_modified_date: datetime.datetime
:param post_id: (Optional) The post identifier. This should be ``None``
for an insert call,
and a valid value for update. (default ``None``)
:type post_id: str
:return: The post_id value, in case of a successful insert or update.
Return ``None`` if there were errors.
"""
new_post = post_id is None
post_id = _as_int(post_id)
current_datetime = datetime.datetime.utcnow()
draft = 1 if draft is True else 0
post_date = post_date if post_date is not None else current_datetime
last_modified_date = last_modified_date if last_modified_date is not \
None else current_datetime
with self._engine.begin() as conn:
try:
if post_id is not None: # validate post_id
exists_statement = sqla.select([self._post_table]).where(
self._post_table.c.id == post_id)
exists = \
conn.execute(exists_statement).fetchone() is not None
post_id = post_id if exists else None
post_statement = \
self._post_table.insert() if post_id is None else \
self._post_table.update().where(
self._post_table.c.id == post_id)
post_statement = post_statement.values(
title=title, text=text, post_date=post_date,
last_modified_date=last_modified_date, draft=draft
)
post_result = conn.execute(post_statement)
post_id = post_result.inserted_primary_key[0] \
if post_id is None else post_id
self._save_tags(tags, post_id, conn)
self._save_user_post(user_id, post_id, conn)
except Exception as e:
self._logger.exception(str(e))
post_id = None
return post_id | 0.001282 |
def poke_16(library, session, address, data):
"""Write an 16-bit value from the specified address.
Corresponds to viPoke16 function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param address: Source address to read the value.
:param data: value to be written to the bus.
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
"""
return library.viPoke16(session, address, data) | 0.001862 |
def create_modules_toc_file(master_package, modules, opts, name='modules'):
"""
Create the module's index.
"""
text = format_heading(1, '%s Modules' % opts.header)
text += '.. toctree::\n'
text += ' :maxdepth: %s\n\n' % opts.maxdepth
modules.sort()
prev_module = ''
for module in modules:
# look if the module is a subpackage and, if yes, ignore it
if module.startswith(prev_module + '.'):
continue
prev_module = module
text += ' %s\n' % module
write_file(name, text, opts) | 0.001776 |
def urlencode_utf8(params):
"""
UTF-8 safe variant of urllib.urlencode.
http://stackoverflow.com/a/8152242
"""
if hasattr(params, 'items'):
params = params.items()
params = (
'='.join((
quote_plus(k.encode('utf8'), safe='/'),
quote_plus(v.encode('utf8'), safe='/')
)) for k, v in params
)
return '&'.join(params) | 0.002551 |
def _add_sample_measure(self, measure_params, num_samples):
"""Generate memory samples from current statevector.
Args:
measure_params (list): List of (qubit, cmembit) values for
measure instructions to sample.
num_samples (int): The number of memory samples to generate.
Returns:
list: A list of memory values in hex format.
"""
# Get unique qubits that are actually measured
measured_qubits = list({qubit for qubit, cmembit in measure_params})
num_measured = len(measured_qubits)
# Axis for numpy.sum to compute probabilities
axis = list(range(self._number_of_qubits))
for qubit in reversed(measured_qubits):
# Remove from largest qubit to smallest so list position is correct
# with respect to position from end of the list
axis.remove(self._number_of_qubits - 1 - qubit)
probabilities = np.reshape(np.sum(np.abs(self._statevector) ** 2,
axis=tuple(axis)),
2 ** num_measured)
# Generate samples on measured qubits
samples = self._local_random.choice(range(2 ** num_measured),
num_samples, p=probabilities)
# Convert to bit-strings
memory = []
for sample in samples:
classical_memory = self._classical_memory
for count, (qubit, cmembit) in enumerate(sorted(measure_params)):
qubit_outcome = int((sample & (1 << count)) >> count)
membit = 1 << cmembit
classical_memory = (classical_memory & (~membit)) | (qubit_outcome << cmembit)
value = bin(classical_memory)[2:]
memory.append(hex(int(value, 2)))
return memory | 0.001601 |
def scan_line(self, line, regex):
"""Checks if regex is in line, returns bool"""
return bool(re.search(regex, line, flags=re.IGNORECASE)) | 0.013072 |
def _pre_heat_deploy(self):
"""Setup before the Heat stack create or update has been done."""
clients = self.app.client_manager
compute_client = clients.compute
self.log.debug("Checking hypervisor stats")
if utils.check_hypervisor_stats(compute_client) is None:
raise exceptions.DeploymentError(
"Expected hypervisor stats not met")
return True | 0.004751 |
def fit_transform(self, data):
"""
Fits and transforms the SFrame `data` using a fitted model.
Parameters
----------
data : SFrame
The data to be transformed.
Returns
-------
A transformed SFrame.
Returns
-------
out: SFrame
A transformed SFrame.
See Also
--------
fit, transform
"""
self._setup_from_data(data)
ret = self.transform_chain.fit_transform(data)
self.__proxy__.update({"fitted" : True})
return ret | 0.005068 |
def ssn(self, min_age=18, max_age=90):
"""
Returns a 10 digit Swedish SSN, "Personnummer".
It consists of 10 digits in the form YYMMDD-SSGQ, where
YYMMDD is the date of birth, SSS is a serial number
and Q is a control character (Luhn checksum).
http://en.wikipedia.org/wiki/Personal_identity_number_(Sweden)
"""
def _luhn_checksum(number):
def digits_of(n):
return [int(d) for d in str(n)]
digits = digits_of(number)
odd_digits = digits[-1::-2]
even_digits = digits[-2::-2]
checksum = 0
checksum += sum(odd_digits)
for d in even_digits:
checksum += sum(digits_of(d * 2))
return checksum % 10
def _calculate_luhn(partial_number):
check_digit = _luhn_checksum(int(partial_number) * 10)
return check_digit if check_digit == 0 else 10 - check_digit
age = datetime.timedelta(
days=self.generator.random.randrange(min_age * 365, max_age * 365))
birthday = datetime.datetime.now() - age
pnr_date = birthday.strftime('%y%m%d')
suffix = str(self.generator.random.randrange(0, 999)).zfill(3)
luhn_checksum = str(_calculate_luhn(pnr_date + suffix))
pnr = '{0}-{1}{2}'.format(pnr_date, suffix, luhn_checksum)
return pnr | 0.001425 |
def store(self, t, step):
"""
Record the state/algeb values at time t to self.vars
"""
max_cache = int(self.system.tds.config.max_cache)
if len(self.vars) >= max_cache > 0:
self.dump()
self.vars = list()
self.t = list()
self.k = list()
logger.debug(
'varout cache cleared at simulation t = {:g}.'.format(
self.system.dae.t))
self._mode = 'a'
var_data = matrix([self.system.dae.x, self.system.dae.y])
# ===== This code block is deprecated =====
self.t.append(t)
self.k.append(step)
self.vars.append(var_data)
# =========================================
# clear data cache if written to disk
if self.np_nrows >= max_cache > 0:
self.dump_np_vars()
self.np_vars = np.zeros(self._np_block_shape)
self.np_nrows = 0
self.np_t = np.zeros((self._np_block_rows,))
self.np_k = np.zeros((self._np_block_rows,))
logger.debug(
'np_vars cache cleared at simulation t = {:g}.'.format(
self.system.dae.t))
self._mode = 'a'
# initialize before first-time adding data
if self.np_nrows == 0:
self.np_ncols = len(var_data)
self._np_block_shape = (self._np_block_rows, self.np_ncols)
self.np_vars = np.zeros(self._np_block_shape)
self.np_t = np.zeros((self._np_block_rows,))
self.np_k = np.zeros((self._np_block_rows,))
# adding data to the matrix
# self.np_vars[self.np_nrows, 0] = t
self.np_t[self.np_nrows] = t
self.np_k[self.np_nrows] = step
self.np_vars[self.np_nrows, :] = np.array(var_data).reshape((-1))
self.np_nrows += 1
# check if matrix extension is needed
if self.np_nrows >= self.np_vars.shape[0]:
self.np_vars = np.concatenate([self.np_vars, np.zeros(self._np_block_shape)], axis=0)
self.np_t = np.concatenate([self.np_t, np.zeros((self._np_block_rows,))], axis=0)
self.np_k = np.concatenate([self.np_k, np.zeros((self._np_block_rows,))], axis=0)
# remove the post-computed variables from the variable list
if self.system.tds.config.compute_flows:
self.system.dae.y = self.system.dae.y[:self.system.dae.m] | 0.002048 |
def archive_context(filename):
"""
Unzip filename to a temporary directory, set to the cwd.
The unzipped target is cleaned up after.
"""
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
try:
with ContextualZipFile(filename) as archive:
archive.extractall()
except zipfile.BadZipfile as err:
if not err.args:
err.args = ('', )
err.args = err.args + (
MEANINGFUL_INVALID_ZIP_ERR_MSG.format(filename),
)
raise
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
yield
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir) | 0.001138 |
def calc_constitutive_matrix(self):
"""Calculates the laminate constitutive matrix
This is the commonly called ``ABD`` matrix with ``shape=(6, 6)`` when
the classical laminated plate theory is used, or the ``ABDE`` matrix
when the first-order shear deformation theory is used, containing the
transverse shear terms.
"""
self.A_general = np.zeros([5,5], dtype=np.float64)
self.B_general = np.zeros([5,5], dtype=np.float64)
self.D_general = np.zeros([5,5], dtype=np.float64)
lam_thick = sum([ply.h for ply in self.plies])
self.h = lam_thick
h0 = -lam_thick/2 + self.offset
for ply in self.plies:
hk_1 = h0
h0 += ply.h
hk = h0
self.A_general += ply.QL*(hk - hk_1)
self.B_general += 1/2.*ply.QL*(hk**2 - hk_1**2)
self.D_general += 1/3.*ply.QL*(hk**3 - hk_1**3)
self.A = self.A_general[0:3, 0:3]
self.B = self.B_general[0:3, 0:3]
self.D = self.D_general[0:3, 0:3]
self.E = self.A_general[3:5, 3:5]
conc1 = np.concatenate([self.A, self.B], axis=1)
conc2 = np.concatenate([self.B, self.D], axis=1)
self.ABD = np.concatenate([conc1, conc2], axis=0)
self.ABDE = np.zeros((8, 8), dtype=np.float64)
self.ABDE[0:6, 0:6] = self.ABD
self.ABDE[6:8, 6:8] = self.E | 0.003546 |
def in_region(rname, rstart, target_chr, target_start, target_end):
"""
Quick check if a point is within the target region.
"""
return (rname == target_chr) and \
(target_start <= rstart <= target_end) | 0.004386 |
def collect_population_best(self, best_chromosome, best_fitness_function):
"""!
@brief Stores the best chromosome for current specific iteration and its fitness function's value.
@param[in] best_chromosome (list): The best chromosome on specific iteration.
@param[in] best_fitness_function (float): Fitness function value of the chromosome.
"""
if not self._need_population_best:
return
self._best_population_result['chromosome'].append(best_chromosome)
self._best_population_result['fitness_function'].append(best_fitness_function) | 0.012739 |
def handle(self, *args, **options):
"""
get all the triggers that need to be handled
"""
from django.db import connection
connection.close()
failed_tries = settings.DJANGO_TH.get('failed_tries', 10)
trigger = TriggerService.objects.filter(
Q(provider_failed__lte=failed_tries) |
Q(consumer_failed__lte=failed_tries),
status=True,
user__is_active=True,
provider__name__status=True,
consumer__name__status=True,
).select_related('consumer__name', 'provider__name')
try:
with Pool(processes=settings.DJANGO_TH.get('processes')) as pool:
p = Pub()
result = pool.map_async(p.publishing, trigger)
result.get(timeout=60)
except TimeoutError as e:
logger.warning(e) | 0.00226 |
def K(self):
"""Kernel matrix
Returns
-------
K : array-like, shape=[n_samples, n_samples]
kernel matrix defined as the adjacency matrix with
ones down the diagonal
"""
try:
return self._kernel
except AttributeError:
self._kernel = self._build_kernel()
return self._kernel | 0.005141 |
def from_jd(jd):
'''Return Gregorian date in a (Y, M, D) tuple'''
wjd = floor(jd - 0.5) + 0.5
depoch = wjd - EPOCH
quadricent = floor(depoch / INTERCALATION_CYCLE_DAYS)
dqc = depoch % INTERCALATION_CYCLE_DAYS
cent = floor(dqc / LEAP_SUPPRESSION_DAYS)
dcent = dqc % LEAP_SUPPRESSION_DAYS
quad = floor(dcent / LEAP_CYCLE_DAYS)
dquad = dcent % LEAP_CYCLE_DAYS
yindex = floor(dquad / YEAR_DAYS)
year = (
quadricent * INTERCALATION_CYCLE_YEARS +
cent * LEAP_SUPPRESSION_YEARS +
quad * LEAP_CYCLE_YEARS + yindex
)
if not (cent == 4 or yindex == 4):
year += 1
yearday = wjd - to_jd(year, 1, 1)
leap = isleap(year)
if yearday < 58 + leap:
leap_adj = 0
elif leap:
leap_adj = 1
else:
leap_adj = 2
month = floor((((yearday + leap_adj) * 12) + 373) / 367)
day = int(wjd - to_jd(year, month, 1)) + 1
return (year, month, day) | 0.00104 |
def from_jd(jd):
'''Calculate Indian Civil date from Julian day
Offset in years from Saka era to Gregorian epoch'''
start = 80
# Day offset between Saka and Gregorian
jd = trunc(jd) + 0.5
greg = gregorian.from_jd(jd) # Gregorian date for Julian day
leap = isleap(greg[0]) # Is this a leap year?
# Tentative year in Saka era
year = greg[0] - SAKA_EPOCH
# JD at start of Gregorian year
greg0 = gregorian.to_jd(greg[0], 1, 1)
yday = jd - greg0 # Day number (0 based) in Gregorian year
if leap:
Caitra = 31 # Days in Caitra this year
else:
Caitra = 30
if yday < start:
# Day is at the end of the preceding Saka year
year -= 1
yday += Caitra + (31 * 5) + (30 * 3) + 10 + start
yday -= start
if yday < Caitra:
month = 1
day = yday + 1
else:
mday = yday - Caitra
if (mday < (31 * 5)):
month = trunc(mday / 31) + 2
day = (mday % 31) + 1
else:
mday -= 31 * 5
month = trunc(mday / 30) + 7
day = (mday % 30) + 1
return (year, month, int(day)) | 0.000864 |
def get_event_attendees(self, id, **data):
"""
GET /events/:id/attendees/
Returns a :ref:`paginated <pagination>` response with a key of ``attendees``, containing a list of :format:`attendee`.
"""
return self.get("/events/{0}/attendees/".format(id), data=data) | 0.012945 |
def _load_text(handle, split=False, encoding="utf-8"):
"""Load and decode a string."""
string = handle.read().decode(encoding)
return string.splitlines() if split else string | 0.005376 |
def add(self, date_range, library_name):
"""
Adds the library with the given date range to the underlying collection of libraries used by this store.
The underlying libraries should not overlap as the date ranges are assumed to be CLOSED_CLOSED by this function
and the rest of the class.
Arguments:
date_range: A date range provided on the assumption that it is CLOSED_CLOSED. If for example the underlying
libraries were split by year, the start of the date range would be datetime.datetime(year, 1, 1) and the end
would be datetime.datetime(year, 12, 31, 23, 59, 59, 999000). The date range must fall on UTC day boundaries,
that is the start must be add midnight and the end must be 1 millisecond before midnight.
library_name: The name of the underlying library. This must be the name of a valid Arctic library
"""
# check that the library is valid
try:
self._arctic_lib.arctic[library_name]
except Exception as e:
logger.error("Could not load library")
raise e
assert date_range.start and date_range.end, "Date range should have start and end properties {}".format(date_range)
start = date_range.start.astimezone(mktz('UTC')) if date_range.start.tzinfo is not None else date_range.start.replace(tzinfo=mktz('UTC'))
end = date_range.end.astimezone(mktz('UTC')) if date_range.end.tzinfo is not None else date_range.end.replace(tzinfo=mktz('UTC'))
assert start.time() == time.min and end.time() == end_time_min, "Date range should fall on UTC day boundaries {}".format(date_range)
# check that the date range does not overlap
library_metadata = self._get_library_metadata(date_range)
if len(library_metadata) > 1 or (len(library_metadata) == 1 and library_metadata[0] != library_name):
raise OverlappingDataException("""There are libraries that overlap with the date range:
library: {}
overlapping libraries: {}""".format(library_name, [l.library for l in library_metadata]))
self._collection.update_one({'library_name': library_name},
{'$set': {'start': start, 'end': end}}, upsert=True) | 0.007989 |
def setModelData( self, editor, model, index ):
"""
Sets the data for the given index from the editor's value.
:param editor | <QWidget>
model | <QAbstractItemModel>
index | <QModelIndex>
"""
tree = self.parent()
querywidget = tree.parent()
factory = querywidget.factory()
item = tree.itemFromIndex(index)
value = factory.editorData(editor)
item.setValue(value) | 0.016729 |
def add_prefix(self, prefix, flags, prf):
"""Add network prefix.
Args:
prefix (str): network prefix.
flags (str): network prefix flags, please refer thread documentation for details
prf (str): network prf, please refer thread documentation for details
"""
self._req('prefix add %s %s %s' % (prefix, flags, prf))
time.sleep(1)
self._req('netdataregister') | 0.009112 |
def pretty_print_model(devicemodel):
"""Prints out a device model in the terminal by parsing dict."""
PRETTY_PRINT_MODEL = """Device Model ID: %(deviceModelId)s
Project ID: %(projectId)s
Device Type: %(deviceType)s"""
logging.info(PRETTY_PRINT_MODEL % devicemodel)
if 'traits' in devicemodel:
for trait in devicemodel['traits']:
logging.info(' Trait %s' % trait)
else:
logging.info('No traits')
logging.info('') | 0.002058 |
def is_scn(self):
"""Page contains Leica SCN XML in ImageDescription tag."""
if self.index > 1 or not self.description:
return False
d = self.description
return d[:14] == '<?xml version=' and d[-6:] == '</scn>' | 0.007874 |
def get_artifact(self):
"""Return the job artifact built by the parser."""
self.artifact[self.parser.name] = self.parser.get_artifact()
return self.artifact | 0.011111 |
def get_vars_dataframe(self, *varnames):
"""
Return pandas DataFrame with the value of the variables specified in `varnames`.
Can be used for task/works/flow. It's recursive!
.. example:
flow.get_vars_dataframe("ecut", "ngkpt")
work.get_vars_dataframe("acell", "usepawu")
"""
import pandas as pd
if self.is_task:
df = pd.DataFrame([{v: self.input.get(v, None) for v in varnames}], index=[self.name], columns=varnames)
df["class"] = self.__class__.__name__
return df
elif self.is_work:
frames = [task.get_vars_dataframe(*varnames) for task in self]
return pd.concat(frames)
elif self.is_flow:
frames = [work.get_vars_dataframe(*varnames) for work in self]
return pd.concat(frames)
else:
#print("Ignoring node of type: `%s`" % type(self))
return pd.DataFrame(index=[self.name]) | 0.00504 |
def subtract_months(self, months: int) -> datetime:
""" Subtracts a number of months from the current value """
self.value = self.value - relativedelta(months=months)
return self.value | 0.009615 |
def _init_fcp_pool(self, fcp_list, assigner_id):
"""The FCP infomation got from smt(zthin) looks like :
host: FCP device number: xxxx
host: Status: Active
host: NPIV world wide port number: xxxxxxxx
host: Channel path ID: xx
host: Physical world wide port number: xxxxxxxx
......
host: FCP device number: xxxx
host: Status: Active
host: NPIV world wide port number: xxxxxxxx
host: Channel path ID: xx
host: Physical world wide port number: xxxxxxxx
"""
complete_fcp_set = self._expand_fcp_list(fcp_list)
fcp_info = self._get_all_fcp_info(assigner_id)
lines_per_item = 5
num_fcps = len(fcp_info) // lines_per_item
for n in range(0, num_fcps):
fcp_init_info = fcp_info[(5 * n):(5 * (n + 1))]
fcp = FCP(fcp_init_info)
dev_no = fcp.get_dev_no()
if dev_no in complete_fcp_set:
if fcp.is_valid():
self._fcp_pool[dev_no] = fcp
else:
errmsg = ("Find an invalid FCP device with properties {"
"dev_no: %(dev_no)s, "
"NPIV_port: %(NPIV_port)s, "
"CHPID: %(CHPID)s, "
"physical_port: %(physical_port)s} !") % {
'dev_no': fcp.get_dev_no(),
'NPIV_port': fcp.get_npiv_port(),
'CHPID': fcp.get_chpid(),
'physical_port': fcp.get_physical_port()}
LOG.warning(errmsg)
else:
# normal, FCP not used by cloud connector at all
msg = "Found a fcp %s not in fcp_list" % dev_no
LOG.debug(msg) | 0.00105 |
def get_column(self, position, missing_seqs=MissingSequenceHandler.SKIP):
"""
return a column from an alignment as a dictionary indexed by seq. name.
:param position: the index to extract; these are in alignment
co-ordinates, which are one-based, so the first column
has index 1, and the final column has
index == size(self).
:param missing_seqs: how to treat sequence with no actual sequence data for
the column.
:return: dictionary where keys are sequence names and values are
nucleotides (raw strings).
"""
res = {}
for k in self.sequences:
if isinstance(self.sequences[k], UnknownSequence):
if missing_seqs is MissingSequenceHandler.TREAT_AS_ALL_GAPS:
res[k] = "-"
elif missing_seqs is MissingSequenceHandler.SKIP:
continue
else:
res[k] = self.sequences[k][position - 1]
return res | 0.005025 |
def account_products(self):
''' a method to retrieve a list of the account products
returns:
{
"error": "",
"code": 200,
"method": "GET",
"url": "https://...",
"headers": { },
"json": {
"entries": [
{
"productId": "3000",
"productName": "Capital One 360 Savings Account"
}
]
}
}
'''
title = '%s.account_products' % self.__class__.__name__
# construct url
url = self.deposits_endpoint + 'account-products'
# send request
details = self._requests(url)
return details | 0.008578 |
def _map_sextuple_to_phenotype(
self, superterm1_id, subterm1_id, quality_id, superterm2_id,
subterm2_id, modifier):
"""
This will take the 6-part EQ-style annotation
used by ZFIN and return the ZP id.
Currently relies on an external mapping file,
but the method may be swapped out in the future
:param superterm1_id:
:param subterm1_id:
:param quality_id:
:param superterm2_id:
:param subterm2_id:
:param modifier:
:return: ZP id
"""
zp_id = None
# zfin uses free-text modifiers,
# but we need to convert them to proper PATO classes for the mapping
mod_id = self.resolve(modifier, False)
if modifier == mod_id:
LOG.warning("no mapping for pato modifier " + modifier)
key = self._make_zpkey(
superterm1_id, subterm1_id, quality_id,
superterm2_id, subterm2_id, mod_id)
mapping = self.zp_map.get(key)
if mapping is None:
if modifier == 'normal':
pass
# LOG.info("Normal phenotypes not yet supported")
else:
LOG.warning(
"Couldn't map ZP id to %s with modifier %s", "_"
.join((
superterm1_id, subterm1_id, quality_id,
superterm2_id, subterm2_id, mod_id)), modifier)
else:
zp_id = mapping['zp_id']
return zp_id | 0.00131 |
def refresh_client(self, from_dt=None, to_dt=None):
"""
Refreshes the ContactsService endpoint, ensuring that the
contacts data is up-to-date.
"""
params_contacts = dict(self.params)
params_contacts.update({
'clientVersion': '2.1',
'locale': 'en_US',
'order': 'last,first',
})
req = self.session.get(
self._contacts_refresh_url,
params=params_contacts
)
self.response = req.json()
params_refresh = dict(self.params)
params_refresh.update({
'prefToken': req.json()["prefToken"],
'syncToken': req.json()["syncToken"],
})
self.session.post(self._contacts_changeset_url, params=params_refresh)
req = self.session.get(
self._contacts_refresh_url,
params=params_contacts
)
self.response = req.json() | 0.002134 |
def noclip(args):
"""
%prog noclip bamfile
Remove clipped reads from BAM.
"""
p = OptionParser(noclip.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bamfile, = args
noclipbam = bamfile.replace(".bam", ".noclip.bam")
cmd = "samtools view -h {} | awk -F '\t' '($6 !~ /H|S/)'".format(bamfile)
cmd += " | samtools view -@ 4 -b -o {}".format(noclipbam)
sh(cmd)
sh("samtools index {}".format(noclipbam)) | 0.001988 |
def sensors(self):
"""Return all known sensors.
:return: list of :class:`Sensor` instances.
"""
sensors = []
try:
while True:
sensor = self.lib.tdSensor()
sensors.append(Sensor(lib=self.lib, **sensor))
except TelldusError as e:
if e.error != const.TELLSTICK_ERROR_DEVICE_NOT_FOUND:
raise
return sensors | 0.00464 |
def count_reads(self, l):
"""
Iterate over the reads on a particular genome in the bam file.
Parameters
----------
l : str
A location.
"""
if self.__paired:
cmd = [self.__samtools, 'view', '-c', '-f', '3', self.__bam, l]
else:
cmd = [self.__samtools, 'view', '-c', '-F', '4', self.__bam, l]
#print(' '.join(cmd))
stdout = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout
ret = int(stdout.readline().decode('utf-8').strip())
stdout.close()
return ret | 0.015528 |
def get_swagger_operation(self, context=default_context):
"""
get the swagger_schema operation representation.
"""
consumes = produces = context.contenttype_serializers.keys()
parameters = get_swagger_parameters(self.parameters, context)
responses = {
"400": Response(
{
"description": "invalid input received",
"schema": Schema(
{
"title": "FailureObject",
"type": "object",
"properties": {
"success": {"type": "boolean"},
"result": {"type": "string"},
},
"required": ["success", "result"],
}
),
}
)
}
for code, details in self.response_types.items():
responses[str(code)] = details.swagger_definition(context)
return Operation(
{
"summary": self.summary,
"description": self.description,
"consumes": consumes,
"produces": produces,
"parameters": parameters,
"responses": responses,
"operationId": self.raw_func.__name__,
"tags": self.tags,
}
) | 0.001371 |
def check_topic_model_string_format(term_dict):
'''
Parameters
----------
term_dict: dict {metadataname: [term1, term2, ....], ...}
Returns
-------
None
'''
if type(term_dict) != dict:
raise TypeError("Argument for term_dict must be a dict, keyed on strings, and contain a list of strings.")
for k, v in term_dict.items():
if type(v) != list:
raise TypeError("Values in term dict must only be lists.")
if sys.version_info[0] == 2:
if type(k) != str and type(k) != unicode:
raise TypeError("Keys in term dict must be of type str or unicode.")
for e in v:
if type(k) != str and type(k) != unicode:
raise TypeError("Values in term lists must be str or unicode.")
if sys.version_info[0] == 3:
if type(k) != str:
raise TypeError("Keys in term dict must be of type str.")
for e in v:
if type(e) != str:
raise TypeError("Values in term lists must be str.") | 0.003676 |
def import_object_ns(name_space, import_str, *args, **kwargs):
"""Tries to import object from default namespace.
Imports a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to
a full path if not found in the default namespace.
"""
import_value = "%s.%s" % (name_space, import_str)
try:
return import_class(import_value)(*args, **kwargs)
except ImportError:
return import_class(import_str)(*args, **kwargs) | 0.001938 |
def build_all(self) -> BuildProcessStats:
"""
Build all the targets, very high level method
:return:
"""
stats = BuildProcessStats()
for builder in self.builders:
self.build_by_builder(
builder=builder,
stats=stats,
)
return stats | 0.005848 |
def _wait_for_files(path):
"""
Retry with backoff up to 1 second to delete files from a directory.
:param str path: The path to crawl to delete files from
:return: A list of remaining paths or None
:rtype: Optional[List[str]]
"""
timeout = 0.001
remaining = []
while timeout < 1.0:
remaining = []
if os.path.isdir(path):
L = os.listdir(path)
for target in L:
_remaining = _wait_for_files(target)
if _remaining:
remaining.extend(_remaining)
continue
try:
os.unlink(path)
except FileNotFoundError as e:
if e.errno == errno.ENOENT:
return
except (OSError, IOError, PermissionError):
time.sleep(timeout)
timeout *= 2
remaining.append(path)
else:
return
return remaining | 0.001075 |
def install(self, build_dir, install_dir=None, **kwargs):
"""This function builds the cmake install command."""
# pylint: disable=no-self-use
del kwargs
install_args = ["cmake", "--build", build_dir, "--target", "install"]
install_args.extend(self._get_build_flags())
if install_dir:
self._target_config.env["DESTDIR"] = install_dir
return [{"args": install_args}] | 0.00463 |
def sanitize_latex(string):
"""
Sanitize a string for input to LaTeX.
Replacements taken from `Stack Overflow
<http://stackoverflow.com/questions/2627135/how-do-i-sanitize-latex-input>`_
**Parameters**
string: str
**Returns**
sanitized_string: str
"""
sanitized_string = string
for old, new in _latex_replacements:
sanitized_string = sanitized_string.replace(old, new)
return sanitized_string | 0.002208 |
def _read_credential_file(self, cfg):
"""
Implements the default (keystone) behavior.
"""
self.username = cfg.get("keystone", "username")
self.password = cfg.get("keystone", "password", raw=True)
self.tenant_id = cfg.get("keystone", "tenant_id") | 0.006826 |
def draw_salt_bridges(self,color="blue"):
"""
For each bond that has been determined to be important, a line gets drawn.
"""
self.draw_saltbridges=""
if self.saltbridges!=None:
for bond in self.saltbridges.saltbridges_for_drawing:
self.draw_saltbridges ="<g class='SaltBridges' transform='translate("+str((self.molecule.x_dim-self.molecule.molsize1)/2)+","+str((self.molecule.y_dim-self.molecule.molsize2)/2)+")'>'"
atom = self.topology_data.universe.atoms[bond[0]-1] #zero-based index vs one-based index
residue = (atom.resname, str(atom.resid), atom.segid)
self.draw_saltbridges=self.draw_saltbridges+"<line x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:white;stroke-width:15' />"
self.draw_saltbridges=self.draw_saltbridges+"<line x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:"+color+";stroke-width:4' />"
self.draw_saltbridges= self.draw_saltbridges+"</g>" | 0.019858 |
def Run(self, arg):
"""Returns the client stats."""
if arg is None:
arg = rdf_client_action.GetClientStatsRequest()
proc = psutil.Process(os.getpid())
meminfo = proc.memory_info()
boot_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(psutil.boot_time())
create_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(proc.create_time())
stats_collector = stats_collector_instance.Get()
response = rdf_client_stats.ClientStats(
RSS_size=meminfo.rss,
VMS_size=meminfo.vms,
memory_percent=proc.memory_percent(),
bytes_received=stats_collector.GetMetricValue(
"grr_client_received_bytes"),
bytes_sent=stats_collector.GetMetricValue("grr_client_sent_bytes"),
create_time=create_time,
boot_time=boot_time)
response.cpu_samples = self.grr_worker.stats_collector.CpuSamplesBetween(
start_time=arg.start_time, end_time=arg.end_time)
response.io_samples = self.grr_worker.stats_collector.IOSamplesBetween(
start_time=arg.start_time, end_time=arg.end_time)
self.Send(response) | 0.002735 |
def follow(self, delay=1.0):
"""\
Iterator generator that returns lines as data is added to the file.
Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035
"""
# TODO: Handle log file rotation
self.trailing = True
unchanged_stats = 0
while not self.should_stop_follow:
where = self.file.tell()
line = self.file.readline()
if line:
if self.trailing and line in self.line_terminators:
# This is just the line terminator added to the end of the file
# before a new line, ignore.
self.trailing = False
continue
if line[-1] in self.line_terminators:
line = line[:-1]
if line[-1:] == '\r\n' and '\r\n' in self.line_terminators:
# found crlf
line = line[:-1]
self.trailing = False
unchanged_stats = 0
yield line
else:
self.trailing = True
self.seek(where)
yield no_new_line
# Try to catch up rotated log file
unchanged_stats += 1
if unchanged_stats >= self.MAX_UNCHANGED_STATS and \
where != os.stat(self.file.name).st_size:
logger.info('Reopen log file because file may has been rotated.')
self.reopen_file()
time.sleep(delay) | 0.004397 |
def original_failure(self):
"""
Return the underlying Failure object, if the result is an error.
If no result is yet available, or the result was not an error, None is
returned.
This method is useful if you want to get the original traceback for an
error result.
"""
try:
result = self._result(0.0)
except TimeoutError:
return None
if isinstance(result, Failure):
return result
else:
return None | 0.003752 |
def unpack(self, unpacker):
"""
Unpacks the constant pool from an unpacker stream
"""
(count, ) = unpacker.unpack_struct(_H)
# first item is never present in the actual data buffer, but
# the count number acts like it would be.
items = [(None, None), ]
count -= 1
# Long and Double const types will "consume" an item count,
# but not data
hackpass = False
for _i in range(0, count):
if hackpass:
# previous item was a long or double
hackpass = False
items.append((None, None))
else:
item = _unpack_const_item(unpacker)
items.append(item)
# if this item was a long or double, skip the next
# counter.
if item[0] in (CONST_Long, CONST_Double):
hackpass = True
self.consts = items | 0.002081 |
def _prepend_name_prefix(self, name):
"""Return file name (ie. path) with the prefix directory prepended"""
if not self.name_prefix:
return name
base = self.name_prefix
if base[0] != '/':
base = '/' + base
if name[0] != '/':
name = '/' + name
return base + name | 0.005797 |
def _handler_swagger_ui(self, request, spec, version):
"""
---
parameters:
- name: spec
in: query
type: string
- name: version
in: query
type: integer
enum: [2,3]
"""
version = version or self._version_ui
if self._spec_url:
spec_url = self._spec_url
else:
spec_url = request.url.with_path(self['swagger:spec'].url())
proto = request.headers.get(hdrs.X_FORWARDED_PROTO)
if proto:
spec_url = spec_url.with_scheme(proto)
if isinstance(spec, str):
spec_url = spec_url.with_query(spec=spec)
elif len(self._swagger_data) == 1:
for basePath in self._swagger_data:
spec_url = spec_url.with_query(spec=basePath)
else:
spec_url = spec_url.with_query(spec='/')
spec_url = spec_url.human_repr()
return web.Response(
text=ui.rend_template(spec_url,
prefix=self._swagger_ui,
version=version),
content_type='text/html') | 0.001639 |
def main(target_device):
"""Creates an interactive terminal to the target via RTT.
The main loop opens a connection to the JLink, and then connects
to the target device. RTT is started, the number of buffers is presented,
and then two worker threads are spawned: one for read, and one for write.
The main loops sleeps until the JLink is either disconnected or the
user hits ctrl-c.
Args:
target_device (string): The target CPU to connect to.
Returns:
Always returns ``0`` or a JLinkException.
Raises:
JLinkException on error.
"""
jlink = pylink.JLink()
print("connecting to JLink...")
jlink.open()
print("connecting to %s..." % target_device)
jlink.set_tif(pylink.enums.JLinkInterfaces.SWD)
jlink.connect(target_device)
print("connected, starting RTT...")
jlink.rtt_start()
while True:
try:
num_up = jlink.rtt_get_num_up_buffers()
num_down = jlink.rtt_get_num_down_buffers()
print("RTT started, %d up bufs, %d down bufs." % (num_up, num_down))
break
except pylink.errors.JLinkRTTException:
time.sleep(0.1)
try:
thread.start_new_thread(read_rtt, (jlink,))
thread.start_new_thread(write_rtt, (jlink,))
while jlink.connected():
time.sleep(1)
print("JLink disconnected, exiting...")
except KeyboardInterrupt:
print("ctrl-c detected, exiting...")
pass | 0.001341 |
def write_to_ndef_service(self, data, *blocks):
"""Write block data to an NDEF compatible tag.
This is a convinience method to write block data to a tag that
has system code 0x12FC (NDEF). For other tags this method
simply does nothing. The *data* to write must be a string or
bytearray with length equal ``16 * len(blocks)``. All
parameters following *data* are interpreted as block numbers
to write. To actually pass a list of block numbers requires
unpacking. The following example calls would have the same
effect of writing 32 byte zeros into blocks 1 and 8.::
tag.write_to_ndef_service(32 * "\\0", 1, 8)
tag.write_to_ndef_service(32 * "\\0", *list(1, 8))
Command execution errors raise :exc:`~nfc.tag.TagCommandError`.
"""
if self.sys == 0x12FC:
sc_list = [ServiceCode(0, 0b001001)]
bc_list = [BlockCode(n) for n in blocks]
self.write_without_encryption(sc_list, bc_list, data) | 0.001918 |
def get_method(name):
"""Return the PSD method registered with the given name.
"""
# find method
name = _format_name(name)
try:
return METHODS[name]
except KeyError as exc:
exc.args = ("no PSD method registered with name {0!r}".format(name),)
raise | 0.003378 |
def build_environ(scope: Scope, body: bytes) -> dict:
"""
Builds a scope and request body into a WSGI environ object.
"""
environ = {
"REQUEST_METHOD": scope["method"],
"SCRIPT_NAME": scope.get("root_path", ""),
"PATH_INFO": scope["path"],
"QUERY_STRING": scope["query_string"].decode("ascii"),
"SERVER_PROTOCOL": f"HTTP/{scope['http_version']}",
"wsgi.version": (1, 0),
"wsgi.url_scheme": scope.get("scheme", "http"),
"wsgi.input": io.BytesIO(body),
"wsgi.errors": sys.stdout,
"wsgi.multithread": True,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
# Get server name and port - required in WSGI, not in ASGI
server = scope.get("server") or ("localhost", 80)
environ["SERVER_NAME"] = server[0]
environ["SERVER_PORT"] = server[1]
# Get client IP address
if scope.get("client"):
environ["REMOTE_ADDR"] = scope["client"][0]
# Go through headers and make them into environ entries
for name, value in scope.get("headers", []):
name = name.decode("latin1")
if name == "content-length":
corrected_name = "CONTENT_LENGTH"
elif name == "content-type":
corrected_name = "CONTENT_TYPE"
else:
corrected_name = f"HTTP_{name}".upper().replace("-", "_")
# HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case
value = value.decode("latin1")
if corrected_name in environ:
value = environ[corrected_name] + "," + value
environ[corrected_name] = value
return environ | 0.001208 |
def _sampler_n_samples(self, n_samples):
""" Return (sampler, n_samplers) tuples """
sampler_indices = self.rng_.choice(range(len(self.samplers)),
size=n_samples,
replace=True,
p=self.weights)
return [
(self.samplers[idx], freq)
for idx, freq in itemfreq(sampler_indices)
] | 0.004367 |
def train(self, data_iterator):
"""Train a keras model on a worker
"""
optimizer = get_optimizer(self.master_optimizer)
self.model = model_from_yaml(self.yaml, self.custom_objects)
self.model.compile(optimizer=optimizer,
loss=self.master_loss, metrics=self.master_metrics)
self.model.set_weights(self.parameters.value)
feature_iterator, label_iterator = tee(data_iterator, 2)
x_train = np.asarray([x for x, y in feature_iterator])
y_train = np.asarray([y for x, y in label_iterator])
self.model.compile(optimizer=self.master_optimizer,
loss=self.master_loss,
metrics=self.master_metrics)
weights_before_training = self.model.get_weights()
if x_train.shape[0] > self.train_config.get('batch_size'):
self.model.fit(x_train, y_train, **self.train_config)
weights_after_training = self.model.get_weights()
deltas = subtract_params(
weights_before_training, weights_after_training)
yield deltas | 0.001791 |
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS[imt]
mean = (self._get_magnitude_scaling_term(C, rup.mag) +
self._get_distance_scaling_term(C, rup.mag, dists.rrup) +
self._get_style_of_faulting_term(C, rup.rake) +
self._get_site_scaling_term(C, sites.vs30))
stddevs = self._get_stddevs(imt,
rup.mag,
len(dists.rrup),
stddev_types)
return mean, stddevs | 0.002342 |
def track_download_request(download_url, download_title):
"""Track a download in Piwik"""
from indico_piwik.plugin import PiwikPlugin
if not download_url:
raise ValueError("download_url can't be empty")
if not download_title:
raise ValueError("download_title can't be empty")
request = PiwikRequest(server_url=PiwikPlugin.settings.get('server_api_url'),
site_id=PiwikPlugin.settings.get('site_id_events'),
api_token=PiwikPlugin.settings.get('server_token'),
query_script=PiwikPlugin.track_script)
action_url = quote(download_url)
dt = datetime.now()
request.call(idsite=request.site_id,
rec=1,
action_name=quote(download_title.encode('utf-8')),
url=action_url,
download=action_url,
h=dt.hour, m=dt.minute, s=dt.second) | 0.002139 |
def decrypt_or_cache(filename, **kwargs):
"""
Attempts to load a local version of decrypted secrets before making external api calls.
This is useful as it allows credkeep secrets to be used offline. Options for decrypt_filename can be passed to this
function.
:param filename: filename of encrypted JSON file
:return: Dict containing decrypted keys
"""
clear_fname = enc_to_clear_filename(filename)
if clear_fname:
return json.load(open(clear_fname))
return decrypt_file(filename, **kwargs) | 0.005556 |
def date_created(self):
"""Date the Scopus record was created."""
date_created = self.xml.find('author-profile/date-created', ns)
try:
return (int(date_created.attrib['year']),
int(date_created.attrib['month']),
int(date_created.attrib['day']))
except AttributeError: # date_created is None
return (None, None, None) | 0.004843 |
def get_default_config_help(self):
"""
Return help text
"""
config_help = super(PassengerCollector, self).get_default_config_help()
config_help.update({
"bin": "The path to the binary",
"use_sudo": "Use sudo?",
"sudo_cmd": "Path to sudo",
"passenger_status_bin":
"The path to the binary passenger-status",
"passenger_memory_stats_bin":
"The path to the binary passenger-memory-stats",
})
return config_help | 0.00339 |
def _syntax_style_changed(self):
""" Set the style for the syntax highlighter.
"""
if self._highlighter is None:
# ignore premature calls
return
if self.syntax_style:
self._highlighter.set_style(self.syntax_style)
else:
self._highlighter.set_style_sheet(self.style_sheet) | 0.005571 |
def download_supplementary_files(self, directory='series',
download_sra=True, email=None,
sra_kwargs=None, nproc=1):
"""Download supplementary data.
.. warning::
Do not use parallel option (nproc > 1) in the interactive shell.
For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_
on SO.
Args:
directory (:obj:`str`, optional): Directory to download the data
(in this directory function will create new directory with the
files), by default this will be named with the series
name + _Supp.
download_sra (:obj:`bool`, optional): Indicates whether to download
SRA raw data too. Defaults to True.
email (:obj:`str`, optional): E-mail that will be provided to the
Entrez. Defaults to None.
sra_kwargs (:obj:`dict`, optional): Kwargs passed to the
GSM.download_SRA method. Defaults to None.
nproc (:obj:`int`, optional): Number of processes for SRA download
(default is 1, no parallelization).
Returns:
:obj:`dict`: Downloaded data for each of the GSM
"""
if sra_kwargs is None:
sra_kwargs = dict()
if directory == 'series':
dirpath = os.path.abspath(self.get_accession() + "_Supp")
utils.mkdir_p(dirpath)
else:
dirpath = os.path.abspath(directory)
utils.mkdir_p(dirpath)
downloaded_paths = dict()
if nproc == 1:
# No need to parallelize, running ordinary download in loop
downloaded_paths = dict()
for gsm in itervalues(self.gsms):
logger.info(
"Downloading SRA files for %s series\n" % gsm.name)
paths = gsm.download_supplementary_files(email=email,
download_sra=download_sra,
directory=dirpath,
sra_kwargs=sra_kwargs)
downloaded_paths[gsm.name] = paths
elif nproc > 1:
# Parallelization enabled
downloaders = list()
# Collecting params for Pool.map in a loop
for gsm in itervalues(self.gsms):
downloaders.append([
gsm,
download_sra,
email,
dirpath,
sra_kwargs])
p = Pool(nproc)
results = p.map(_supplementary_files_download_worker, downloaders)
downloaded_paths = dict(results)
else:
raise ValueError("Nproc should be non-negative: %s" % str(nproc))
return downloaded_paths | 0.001987 |
def dhcp_configuration_from_querystring(querystring, option=u'DhcpConfiguration'):
"""
turn:
{u'AWSAccessKeyId': [u'the_key'],
u'Action': [u'CreateDhcpOptions'],
u'DhcpConfiguration.1.Key': [u'domain-name'],
u'DhcpConfiguration.1.Value.1': [u'example.com'],
u'DhcpConfiguration.2.Key': [u'domain-name-servers'],
u'DhcpConfiguration.2.Value.1': [u'10.0.0.6'],
u'DhcpConfiguration.2.Value.2': [u'10.0.0.7'],
u'Signature': [u'uUMHYOoLM6r+sT4fhYjdNT6MHw22Wj1mafUpe0P0bY4='],
u'SignatureMethod': [u'HmacSHA256'],
u'SignatureVersion': [u'2'],
u'Timestamp': [u'2014-03-18T21:54:01Z'],
u'Version': [u'2013-10-15']}
into:
{u'domain-name': [u'example.com'], u'domain-name-servers': [u'10.0.0.6', u'10.0.0.7']}
"""
key_needle = re.compile(u'{0}.[0-9]+.Key'.format(option), re.UNICODE)
response_values = {}
for key, value in querystring.items():
if key_needle.match(key):
values = []
key_index = key.split(".")[1]
value_index = 1
while True:
value_key = u'{0}.{1}.Value.{2}'.format(
option, key_index, value_index)
if value_key in querystring:
values.extend(querystring[value_key])
else:
break
value_index += 1
response_values[value[0]] = values
return response_values | 0.002004 |
def create(self, language, tagged_text, source_channel=values.unset):
"""
Create a new SampleInstance
:param unicode language: The ISO language-country string that specifies the language used for the new sample
:param unicode tagged_text: The text example of how end users might express the task
:param unicode source_channel: The communication channel from which the new sample was captured
:returns: Newly created SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
"""
data = values.of({'Language': language, 'TaggedText': tagged_text, 'SourceChannel': source_channel, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
) | 0.006024 |
def get_nameserver_detail_output_show_nameserver_nameserver_connected_via_ag(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_nameserver_detail = ET.Element("get_nameserver_detail")
config = get_nameserver_detail
output = ET.SubElement(get_nameserver_detail, "output")
show_nameserver = ET.SubElement(output, "show-nameserver")
nameserver_portid_key = ET.SubElement(show_nameserver, "nameserver-portid")
nameserver_portid_key.text = kwargs.pop('nameserver_portid')
nameserver_connected_via_ag = ET.SubElement(show_nameserver, "nameserver-connected-via-ag")
nameserver_connected_via_ag.text = kwargs.pop('nameserver_connected_via_ag')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.007134 |
def guess_datetime_format(lines: Iterable[str], threshold: int=5) \
-> Tuple[Optional[str], Optional[bool]]:
'''Guess whether order of the year, month, day and 12/24 hour.
Returns:
tuple: First item is either str ``ymd``, ``dmy``, ``mdy``
or ``None``.
Second item is either True for 12-hour time or False for 24-hour time
or None.
'''
time_12_score = 0
time_24_score = 0
date_ymd_score = 0
date_dmy_score = 0
date_mdy_score = 0
for line in lines:
line = unicodedata.normalize('NFKD', line).lower()
if DAY_PERIOD_PATTERN.search(line):
time_12_score += 1
else:
time_24_score += 1
if ISO_8601_DATE_PATTERN.search(line):
date_ymd_score += 1
if MMM_DD_YY_PATTERN.search(line):
date_mdy_score += 1
match = NN_NN_NNNN_PATTERN.search(line)
if match:
num_1 = int(match.group(1))
num_2 = int(match.group(2))
if num_1 > 12:
date_dmy_score += 1
elif num_2 > 12:
date_mdy_score += 1
time_score = time_12_score + time_24_score
date_score = date_ymd_score + date_dmy_score + date_mdy_score
if time_score >= threshold and date_score >= threshold:
break
if date_ymd_score or date_dmy_score or date_mdy_score:
top = max([
(date_ymd_score, 'ymd'),
(date_dmy_score, 'dmy'),
(date_mdy_score, 'mdy'),
],
key=lambda item: item[0]
)
date_format = top[1]
else:
date_format = None
if time_12_score or time_24_score:
day_period = True if time_12_score > time_24_score else False
else:
day_period = None
return date_format, day_period | 0.001629 |
def launcher(deploy_mode, args, working_dir=".", cleanup=True):
"""Initializes arguments and starts up pyspark with the correct deploy mode and environment.
Parameters
----------
deploy_mode : {"client", "cluster"}
args : list
Arguments to pass onwards to spark submit.
working_dir : str, optional
Path to working directory to use for creating conda environments. Defaults to the current working directory.
cleanup : bool, optional
Clean up extracted / generated files. This defaults to true since conda environments can be rather large.
Returns
-------
This call will spawn a child process and block until that is complete.
"""
spark_args = args.copy()
# Scan through the arguments to find --conda
# TODO: make this optional, if not specified ignore all the python stuff
# Is this double dash in front of conda env correct?
i = spark_args.index("--conda-env")
# pop off the '--conda-env' portion and just drop it on the floor
spark_args.pop(i)
# Now pop off the actual conda env var passed to the launcher
conda_env = spark_args.pop(i)
cleanup_functions = []
# What else could this possibly be other than a string here?
assert isinstance(conda_env, str)
func_kwargs = {'conda_env': conda_env,
'deploy_mode': deploy_mode,
'working_dir': working_dir,
'cleanup_functions': cleanup_functions}
if conda_env.startswith("hdfs:/"):
# "hadoop fs -ls" can return URLs with only a single "/" after the "hdfs:" scheme
env_name, env_dir, env_archive = _conda_from_hdfs(**func_kwargs)
elif conda_env.endswith(".zip"):
# We have a precreated conda environment around.
env_name, env_dir, conda_env = _conda_from_zip(**func_kwargs)
elif conda_env.endswith(".yaml"):
# The case where we have to CREATE the environment ourselves
env_name, env_dir, env_archive = _conda_from_yaml(**func_kwargs)
else:
raise NotImplementedError("Can only run launcher if your conda env is on hdfs (starts "
"with 'hdfs:/', is already a zip (ends with '.zip'), or is "
"coming from a yaml specification (ends with '.yaml' and "
"conforms to the conda environment.yaml spec)")
del func_kwargs
func_kwargs = dict(env_dir=env_dir, env_name=env_name, env_archive=env_archive, args=spark_args)
funcs = {'client': run_pyspark_yarn_client, 'cluster': run_pyspark_yarn_cluster}
try:
funcs[deploy_mode](**func_kwargs)
finally:
if not cleanup:
return
# iterate over and call all cleanup functions
for function in cleanup_functions:
try:
function()
except:
log.exception("Cleanup function %s failed", function) | 0.004084 |
def connect(cls, region, session=None, access_key=None, secret_key=None,
host=None, port=80, is_secure=True, **kwargs):
"""
Connect to an AWS region.
Parameters
----------
region : str
Name of an AWS region
session : :class:`~botocore.session.Session`, optional
The Session object to use for the connection
access_key : str, optional
If session is None, set this access key when creating the session
secret_key : str, optional
If session is None, set this secret key when creating the session
host : str, optional
Address of the host. Use this to connect to a local instance.
port : int, optional
Connect to the host on this port (default 80)
is_secure : bool, optional
Enforce https connection (default True)
**kwargs : dict
Keyword arguments to pass to the constructor
"""
if session is None:
session = botocore.session.get_session()
if access_key is not None:
session.set_credentials(access_key, secret_key)
url = None
if host is not None:
protocol = 'https' if is_secure else 'http'
url = "%s://%s:%d" % (protocol, host, port)
client = session.create_client('dynamodb', region, endpoint_url=url,
use_ssl=is_secure)
return cls(client, **kwargs) | 0.001989 |
def evaluate(data):
"""Provide evaluations for multiple callers split by structural variant type.
"""
work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "validate"))
truth_sets = tz.get_in(["config", "algorithm", "svvalidate"], data)
if truth_sets and data.get("sv"):
if isinstance(truth_sets, dict):
val_summary, df_csv = _evaluate_multi(data["sv"], truth_sets, work_dir, data)
summary_plots = _plot_evaluation(df_csv)
data["sv-validate"] = {"csv": val_summary, "plot": summary_plots, "df": df_csv}
else:
assert isinstance(truth_sets, six.string_types) and utils.file_exists(truth_sets), truth_sets
val_summary = _evaluate_vcf(data["sv"], truth_sets, work_dir, data)
title = "%s structural variants" % dd.get_sample_name(data)
summary_plots = validateplot.classifyplot_from_valfile(val_summary, outtype="png", title=title)
data["sv-validate"] = {"csv": val_summary, "plot": summary_plots[0] if len(summary_plots) > 0 else None}
return data | 0.007647 |
def rot_consts(geom, masses, units=_EURC.INV_INERTIA, on_tol=_DEF.ORTHONORM_TOL):
"""Rotational constants for a given molecular system.
Calculates the rotational constants for the provided system with numerical
value given in the units provided in `units`. The orthnormality tolerance
`on_tol` is required in order to be passed through to the
:func:`principals` function.
If the system is linear or a single atom, the effectively-zero principal
moments of inertia will be assigned values of
:data:`opan.const.PRM.ZERO_MOMENT_TOL`
before transformation into the appropriate rotational constant units.
The moments of inertia are always sorted in increasing order as
:math:`0 \\leq I_A \\leq I_B \\leq I_C`; the rotational constants
calculated from these will thus always be in **decreasing** order
as :math:`B_A \\geq B_B \\geq B_C`, retaining the
ordering and association with the three principal ``axes[:,i]`` generated
by :func:`principals`.
Parameters
----------
geom
length-3N |npfloat_| --
Coordinates of the atoms
masses
length-N OR length-3N |npfloat_| --
Atomic masses of the atoms. Length-3N option is to allow calculation of
a per-coordinate perturbed value.
units
:class:`~opan.const.EnumUnitsRotConst`, optional --
Enum value indicating the desired units of the output rotational
constants. Default is :data:`~opan.const.EnumUnitsRotConst.INV_INERTIA`
:math:`\\left(1\\over \\mathrm{uB^2}\\right)`
on_tol
|npfloat_|, optional --
Tolerance for deviation from unity/zero for principal axis dot
products, within which axes are considered orthonormal. Default is
:data:`opan.const.DEF.ORTHONORM_TOL`
Returns
-------
rc
length-3 |npfloat_| --
Vector of rotational constants in the indicated units
"""
# Imports
import numpy as np
from ..const import EnumTopType as ETT, EnumUnitsRotConst as EURC, PRM, PHYS
# Ensure units are valid
if not units in EURC:
raise ValueError("'{0}' is not a valid units value".format(units))
## end if
# Retrieve the moments, axes and top type. Geom and masses are proofed
# internally in this call.
mom, ax, top = principals(geom, masses, on_tol)
# Check for special cases
if top == ETT.ATOM:
# All moments are zero; set to zero-moment threshold
mom = np.repeat(PRM.ZERO_MOMENT_TOL, 3)
elif top == ETT.LINEAR:
# First moment is zero; set to zero-moment threshold
mom[0] = PRM.ZERO_MOMENT_TOL
## end if
# Calculate the values in the indicated units
if units == EURC.INV_INERTIA: # 1/(amu*B^2)
rc = 1.0 / (2.0 * mom)
elif units == EURC.ANGFREQ_ATOMIC: # 1/Ta
rc = PHYS.PLANCK_BAR / (2.0 * mom * PHYS.ME_PER_AMU)
elif units == EURC.ANGFREQ_SECS: # 1/s
rc = PHYS.PLANCK_BAR / (2.0 * mom * PHYS.ME_PER_AMU) / PHYS.SEC_PER_TA
elif units == EURC.CYCFREQ_ATOMIC: # cyc/Ta
rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU)
elif units == EURC.CYCFREQ_HZ: # cyc/s
rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU) / \
PHYS.SEC_PER_TA
elif units == EURC.CYCFREQ_MHZ: # Mcyc/s
rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU) / \
PHYS.SEC_PER_TA / 1.0e6
elif units == EURC.WAVENUM_ATOMIC: # cyc/B
rc = PHYS.PLANCK / (mom * PHYS.ME_PER_AMU) / \
(8.0 * np.pi**2.0 * PHYS.LIGHT_SPEED)
elif units == EURC.WAVENUM_CM: # cyc/cm
rc = PHYS.PLANCK / (mom * PHYS.ME_PER_AMU) / \
(8.0 * np.pi**2.0 * PHYS.LIGHT_SPEED * PHYS.ANG_PER_BOHR) * 1.0e8
else: # pragma: no cover -- Valid units; not implemented
raise NotImplementedError("Units conversion not yet implemented.")
## end if
# Return the result
return rc | 0.001702 |
def restrict(self, restriction):
"""
In-place restriction. Restricts the result to a specified subset of the input.
rel.restrict(restriction) is equivalent to rel = rel & restriction or rel &= restriction
rel.restrict(Not(restriction)) is equivalent to rel = rel - restriction or rel -= restriction
The primary key of the result is unaffected.
Successive restrictions are combined as logical AND: r & a & b is equivalent to r & AndList((a, b))
Any QueryExpression, collection, or sequence other than an AndList are treated as OrLists
(logical disjunction of conditions)
Inverse restriction is accomplished by either using the subtraction operator or the Not class.
The expressions in each row equivalent:
rel & True rel
rel & False the empty entity set
rel & 'TRUE' rel
rel & 'FALSE' the empty entity set
rel - cond rel & Not(cond)
rel - 'TRUE' rel & False
rel - 'FALSE' rel
rel & AndList((cond1,cond2)) rel & cond1 & cond2
rel & AndList() rel
rel & [cond1, cond2] rel & OrList((cond1, cond2))
rel & [] rel & False
rel & None rel & False
rel & any_empty_entity_set rel & False
rel - AndList((cond1,cond2)) rel & [Not(cond1), Not(cond2)]
rel - [cond1, cond2] rel & Not(cond1) & Not(cond2)
rel - AndList() rel & False
rel - [] rel
rel - None rel
rel - any_empty_entity_set rel
When arg is another QueryExpression, the restriction rel & arg restricts rel to elements that match at least
one element in arg (hence arg is treated as an OrList).
Conversely, rel - arg restricts rel to elements that do not match any elements in arg.
Two elements match when their common attributes have equal values or when they have no common attributes.
All shared attributes must be in the primary key of either rel or arg or both or an error will be raised.
QueryExpression.restrict is the only access point that modifies restrictions. All other operators must
ultimately call restrict()
:param restriction: a sequence or an array (treated as OR list), another QueryExpression, an SQL condition
string, or an AndList.
"""
assert is_true(restriction) or not self.heading.expressions or isinstance(self, GroupBy), \
"Cannot restrict a projection with renamed attributes in place."
self.restriction.append(restriction)
return self | 0.00509 |
def basic_set_of_users_exists_in_the_database(context):
"""
:type context: behave.runner.Context
"""
user_model = get_user_model()
user_model.objects.create(
username='administrator',
is_staff=True
)
user_model.objects.create(
username='allowed_user'
)
user_model.objects.create(
username='unallowed_user'
) | 0.002545 |
async def mount(self, device):
"""
Mount the device if not already mounted.
:param device: device object, block device path or mount path
:returns: whether the device is mounted.
"""
device = self._find_device(device)
if not self.is_handleable(device) or not device.is_filesystem:
self._log.warn(_('not mounting {0}: unhandled device', device))
return False
if device.is_mounted:
self._log.info(_('not mounting {0}: already mounted', device))
return True
options = match_config(self._config, device, 'options', None)
kwargs = dict(options=options)
self._log.debug(_('mounting {0} with {1}', device, kwargs))
self._check_device_before_mount(device)
mount_path = await device.mount(**kwargs)
self._log.info(_('mounted {0} on {1}', device, mount_path))
return True | 0.002151 |
def add_router_to_hosting_device(self, context, hosting_device_id,
router_id):
"""Add a (non-hosted) router to a hosting device."""
e_context = context.elevated()
r_hd_binding_db = self._get_router_binding_info(e_context, router_id)
if r_hd_binding_db.hosting_device_id:
if r_hd_binding_db.hosting_device_id == hosting_device_id:
return
raise routertypeawarescheduler.RouterHostedByHostingDevice(
router_id=router_id, hosting_device_id=hosting_device_id)
rt_info = self.validate_hosting_device_router_combination(
context, r_hd_binding_db, hosting_device_id)
result = self.schedule_router_on_hosting_device(
e_context, r_hd_binding_db, hosting_device_id,
rt_info['slot_need'])
if result:
# refresh so that we get latest contents from DB
e_context.session.expire(r_hd_binding_db)
router = self.get_router(e_context, router_id)
self.add_type_and_hosting_device_info(
e_context, router, r_hd_binding_db, schedule=False)
l3_cfg_notifier = self.agent_notifiers.get(AGENT_TYPE_L3_CFG)
if l3_cfg_notifier:
l3_cfg_notifier.router_added_to_hosting_device(context, router)
else:
raise routertypeawarescheduler.RouterSchedulingFailed(
router_id=router_id, hosting_device_id=hosting_device_id) | 0.001992 |
def get_current_index(self):
""" Return currently selected index (or -1) """
# Need to convert to int; currently API returns a tuple of string
curSel = self.__lb.curselection()
if curSel and len(curSel) > 0:
return int(curSel[0])
else:
return -1 | 0.006452 |
def filter(cls, parent=None, **filters):
"""
Gets all resources of the given type and parent (if provided) which match the given filters.
This will trigger an api GET request.
:param parent ResourceBase: the parent of the resource - used for nesting the request url, optional
:param **filters: any number of keyword arguments to filter by, e.g name='example name'
:returns: a list of matching resources
"""
data = cls._process_filter_request(parent, **filters)
return cls._load_resources(data) | 0.00885 |
def read(self):
"""Read a Response, do some validation, and return it."""
if FLAGS.sc2_verbose_protocol:
self._log(" Reading response ".center(60, "-"))
start = time.time()
response = self._read()
if FLAGS.sc2_verbose_protocol:
self._log(" %0.1f msec\n" % (1000 * (time.time() - start)))
self._log_packet(response)
if not response.HasField("status"):
raise ProtocolError("Got an incomplete response without a status.")
prev_status = self._status
self._status = Status(response.status) # pytype: disable=not-callable
if response.error:
err_str = ("Error in RPC response (likely a bug). "
"Prev status: %s, new status: %s, error:\n%s" % (
prev_status, self._status, "\n".join(response.error)))
logging.error(err_str)
raise ProtocolError(err_str)
return response | 0.010204 |
def get_cousins_treepos(self, treepos):
"""Given a treeposition, return the treeposition of its siblings."""
cousins_pos = []
mother_pos = self.get_parent_treepos(treepos)
if mother_pos is not None:
aunts_pos = self.get_siblings_treepos(mother_pos)
for aunt_pos in aunts_pos:
cousins_pos.extend( self.get_children_treepos(aunt_pos) )
return cousins_pos | 0.009238 |
def add_image(self, image_path, annotations):
"""Adds an image and its bounding boxes to the current list of files
The bounding boxes are automatically estimated based on the given annotations.
**Parameters:**
``image_path`` : str
The file name of the image, including its full path
``annotations`` : [dict]
A list of annotations, i.e., where each annotation can be anything that :py:func:`bounding_box_from_annotation` can handle; this list can be empty, in case the image does not contain any faces
"""
self.image_paths.append(image_path)
self.bounding_boxes.append([bounding_box_from_annotation(**a) for a in annotations]) | 0.005944 |
def get_file(self):
"""
Load data into a file and return file path.
:return: path to file as string
"""
content = self._load()
if not content:
return None
filename = "temporary_file.bin"
with open(filename, "wb") as file_name:
file_name.write(content)
return filename | 0.005495 |
def redundancy_output_rd_mesg(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
redundancy = ET.Element("redundancy")
config = redundancy
output = ET.SubElement(redundancy, "output")
rd_mesg = ET.SubElement(output, "rd_mesg")
rd_mesg.text = kwargs.pop('rd_mesg')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004535 |
def parse_args(argv):
"""
Use Argparse to parse command-line arguments.
:param argv: list of arguments to parse (``sys.argv[1:]``)
:type argv: :std:term:`list`
:return: parsed arguments
:rtype: :py:class:`argparse.Namespace`
"""
p = argparse.ArgumentParser(
description='webhook2lambda2sqs - Generate code and manage '
'infrastructure for receiving webhooks with AWS API '
'Gateway and pushing to SQS via Lambda - <%s>' % PROJECT_URL
)
p.add_argument('-c', '--config', dest='config', type=str,
action='store', default='config.json',
help='path to config.json (default: ./config.json)')
p.add_argument('-v', '--verbose', dest='verbose', action='count',
default=0,
help='verbose output. specify twice for debug-level output.')
p.add_argument('-V', '--version', action='version',
version='webhook2lambda2sqs v%s <%s>' % (
VERSION, PROJECT_URL
))
p.add_argument('-T', '--tf-version', dest='tf_ver', action='store',
type=str, default='0.9.0',
help='terraform version to generate configurations for')
subparsers = p.add_subparsers(title='Action (Subcommand)', dest='action',
metavar='ACTION', description='Action to '
'perform; each action may take further '
'parameters. Use ACTION -h for subcommand-'
'specific options and arguments.')
subparsers.add_parser(
'generate', help='generate lambda function and terraform configs in ./'
)
tf_parsers = [
('genapply', 'generate function and terraform configs in ./, then run '
'terraform apply'),
('plan', 'run terraform plan to show changes which will be made'),
('apply', 'run terraform apply to apply changes/create infrastructure'),
('destroy',
'run terraform destroy to completely destroy infrastructure')
]
tf_p_objs = {}
for cname, chelp in tf_parsers:
tf_p_objs[cname] = subparsers.add_parser(cname, help=chelp)
tf_p_objs[cname].add_argument('-t', '--terraform-path', dest='tf_path',
action='store', default='terraform',
type=str, help='path to terraform '
'binary, if not in PATH')
tf_p_objs[cname].add_argument('-S', '--no-stream-tf', dest='stream_tf',
action='store_false', default=True,
help='DO NOT stream Terraform output to '
'STDOUT (combined) in realtime')
apilogparser = subparsers.add_parser('apilogs', help='show last 10 '
'CloudWatch Logs entries for the '
'API Gateway')
apilogparser.add_argument('-c', '--count', dest='log_count', type=int,
default=10, help='number of log entries to show '
'(default 10')
logparser = subparsers.add_parser('logs', help='show last 10 CloudWatch '
'Logs entries for the function')
logparser.add_argument('-c', '--count', dest='log_count', type=int,
default=10, help='number of log entries to show '
'(default 10')
queueparser = subparsers.add_parser('queuepeek', help='show messages from '
'one or all of the SQS queues')
queueparser.add_argument('-n', '--name', type=str, dest='queue_name',
default=None, help='queue name to read (defaults '
'to None to read all)')
queueparser.add_argument('-d', '--delete', action='store_true',
dest='queue_delete', default=False,
help='delete messages after reading')
queueparser.add_argument('-c', '--count', dest='msg_count', type=int,
default=10, help='number of messages to read from '
'each queue (default 10)')
testparser = subparsers.add_parser('test', help='send test message to '
'one or more endpoints')
testparser.add_argument('-t', '--terraform-path', dest='tf_path',
action='store', default='terraform',
type=str, help='path to terraform '
'binary, if not in PATH')
testparser.add_argument('-n', '--endpoint-name', dest='endpoint_name',
type=str, default=None,
help='endpoint name (default: None, to send to '
'all endpoints)')
subparsers.add_parser(
'example-config', help='write example config to STDOUT and description '
'of it to STDERR, then exit'
)
args = p.parse_args(argv)
if args.action is None:
# for py3, which doesn't raise on this
sys.stderr.write("ERROR: too few arguments\n")
raise SystemExit(2)
return args | 0.001091 |
def get_model_agents(self):
"""Return a list of all Agents from all Statements.
Returns
-------
agents : list[indra.statements.Agent]
A list of Agents that are in the model.
"""
model_stmts = self.get_statements()
agents = []
for stmt in model_stmts:
for a in stmt.agent_list():
if a is not None:
agents.append(a)
return agents | 0.004357 |
async def fetch_room(self, room_id):
"""Lookup details for a given room id"""
url = "https://production.plum.technology/v2/getRoom"
data = {"rid": room_id}
return await self.__post(url, data) | 0.008969 |
def get_bgp_neighbors(self):
def generate_vrf_query(vrf_name):
"""
Helper to provide XML-query for the VRF-type we're interested in.
"""
if vrf_name == "global":
rpc_command = '<Get><Operational><BGP><InstanceTable><Instance><Naming>\
<InstanceName>default</InstanceName></Naming><InstanceActive><DefaultVRF>\
<GlobalProcessInfo></GlobalProcessInfo><NeighborTable></NeighborTable></DefaultVRF>\
</InstanceActive></Instance></InstanceTable></BGP></Operational></Get>'
else:
rpc_command = '<Get><Operational><BGP><InstanceTable><Instance><Naming>\
<InstanceName>default</InstanceName></Naming><InstanceActive><VRFTable><VRF>\
<Naming>{vrf_name}</Naming><GlobalProcessInfo></GlobalProcessInfo><NeighborTable>\
</NeighborTable></VRF></VRFTable></InstanceActive></Instance></InstanceTable>\
</BGP></Operational></Get>'.format(vrf_name=vrf_name)
return rpc_command
"""
Initial run to figure out what VRF's are available
Decided to get this one from Configured-section
because bulk-getting all instance-data to do the same could get ridiculously heavy
Assuming we're always interested in the DefaultVRF
"""
active_vrfs = ["global"]
rpc_command = '<Get><Operational><BGP><ConfigInstanceTable><ConfigInstance><Naming>\
<InstanceName>default</InstanceName></Naming><ConfigInstanceVRFTable>\
</ConfigInstanceVRFTable></ConfigInstance></ConfigInstanceTable></BGP></Operational></Get>'
result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command))
for node in result_tree.xpath('.//ConfigVRF'):
active_vrfs.append(napalm_base.helpers.find_txt(node, 'Naming/VRFName'))
result = {}
for vrf in active_vrfs:
rpc_command = generate_vrf_query(vrf)
result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command))
this_vrf = {}
this_vrf['peers'] = {}
if vrf == "global":
this_vrf['router_id'] = napalm_base.helpers.convert(
text_type, napalm_base.helpers.find_txt(result_tree,
'Get/Operational/BGP/InstanceTable/Instance/InstanceActive/DefaultVRF\
/GlobalProcessInfo/VRF/RouterID'))
else:
this_vrf['router_id'] = napalm_base.helpers.convert(
text_type, napalm_base.helpers.find_txt(result_tree,
'Get/Operational/BGP/InstanceTable/Instance/InstanceActive/VRFTable/VRF\
/GlobalProcessInfo/VRF/RouterID'))
neighbors = {}
for neighbor in result_tree.xpath('.//Neighbor'):
this_neighbor = {}
this_neighbor['local_as'] = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(neighbor, 'LocalAS'))
this_neighbor['remote_as'] = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(neighbor, 'RemoteAS'))
this_neighbor['remote_id'] = napalm_base.helpers.convert(
text_type, napalm_base.helpers.find_txt(neighbor, 'RouterID'))
if napalm_base.helpers.find_txt(neighbor, 'ConnectionAdminStatus') is "1":
this_neighbor['is_enabled'] = True
try:
this_neighbor['description'] = napalm_base.helpers.convert(
text_type, napalm_base.helpers.find_txt(neighbor, 'Description'))
except AttributeError:
this_neighbor['description'] = u''
this_neighbor['is_enabled'] = (
napalm_base.helpers.find_txt(neighbor, 'ConnectionAdminStatus') == "1")
if str(napalm_base.helpers.find_txt(neighbor, 'ConnectionAdminStatus')) is "1":
this_neighbor['is_enabled'] = True
else:
this_neighbor['is_enabled'] = False
if str(napalm_base.helpers.find_txt(neighbor, 'ConnectionState')) == "BGP_ST_ESTAB":
this_neighbor['is_up'] = True
this_neighbor['uptime'] = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(neighbor, 'ConnectionEstablishedTime'))
else:
this_neighbor['is_up'] = False
this_neighbor['uptime'] = -1
this_neighbor['address_family'] = {}
if napalm_base.helpers.find_txt(neighbor,
'ConnectionRemoteAddress/AFI') == "IPv4":
this_afi = "ipv4"
elif napalm_base.helpers.find_txt(neighbor,
'ConnectionRemoteAddress/AFI') == "IPv6":
this_afi = "ipv6"
else:
this_afi = napalm_base.helpers.find_txt(neighbor, 'ConnectionRemoteAddress/AFI')
this_neighbor['address_family'][this_afi] = {}
try:
this_neighbor['address_family'][this_afi]["received_prefixes"] = \
napalm_base.helpers.convert(int,
napalm_base.helpers.find_txt(
neighbor, 'AFData/Entry/PrefixesAccepted'), 0) + \
napalm_base.helpers.convert(int,
napalm_base.helpers.find_txt(
neighbor, 'AFData/Entry/PrefixesDenied'), 0)
this_neighbor['address_family'][this_afi]["accepted_prefixes"] = \
napalm_base.helpers.convert(int,
napalm_base.helpers.find_txt(
neighbor, 'AFData/Entry/PrefixesAccepted'), 0)
this_neighbor['address_family'][this_afi]["sent_prefixes"] = \
napalm_base.helpers.convert(int,
napalm_base.helpers.find_txt(
neighbor, 'AFData/Entry/PrefixesAdvertised'), 0)
except AttributeError:
this_neighbor['address_family'][this_afi]["received_prefixes"] = -1
this_neighbor['address_family'][this_afi]["accepted_prefixes"] = -1
this_neighbor['address_family'][this_afi]["sent_prefixes"] = -1
neighbor_ip = napalm_base.helpers.ip(
napalm_base.helpers.find_txt(
neighbor, 'Naming/NeighborAddress/IPV4Address') or
napalm_base.helpers.find_txt(
neighbor, 'Naming/NeighborAddress/IPV6Address')
)
neighbors[neighbor_ip] = this_neighbor
this_vrf['peers'] = neighbors
result[vrf] = this_vrf
return result | 0.004847 |
def save_related(self, request, form, formsets, change):
"""
Given the ``HttpRequest``, the parent ``ModelForm`` instance, the
list of inline formsets and a boolean value based on whether the
parent is being added or changed, save the related objects to the
database. Note that at this point save_form() and save_model() have
already been called.
"""
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=change) | 0.003759 |
def flatten(dic, keep_iter=False, position=None):
"""
Returns a flattened dictionary from a dictionary of nested dictionaries and lists.
`keep_iter` will treat iterables as valid values, while also flattening them.
"""
child = {}
if not dic:
return {}
for k, v in get_iter(dic):
if isstr(k):
k = k.replace('.', '_')
if position:
item_position = '%s.%s' % (position, k)
else:
item_position = '%s' % k
if is_iter(v):
child.update(flatten(dic[k], keep_iter, item_position))
if keep_iter:
child[item_position] = v
else:
child[item_position] = v
return child | 0.004144 |
def Parse(self, raw_data):
"""Take the results and yield results that passed through the filters.
The output of each filter is used as the input for successive filters.
Args:
raw_data: An iterable series of rdf values.
Returns:
A list of rdf values that matched all filters.
"""
self.results = raw_data
for f in self.filters:
self.results = f.Parse(self.results)
return self.results | 0.004598 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.