text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _reset_py_display() -> None:
"""
Resets the dynamic objects in the sys module that the py and ipy consoles fight over.
When a Python console starts it adopts certain display settings if they've already been set.
If an ipy console has previously been run, then py uses its settings and ends up looking
like an ipy console in terms of prompt and exception text. This method forces the Python
console to create its own display settings since they won't exist.
IPython does not have this problem since it always overwrites the display settings when it
is run. Therefore this method only needs to be called before creating a Python console.
"""
# Delete any prompts that have been set
attributes = ['ps1', 'ps2', 'ps3']
for cur_attr in attributes:
try:
del sys.__dict__[cur_attr]
except KeyError:
pass
# Reset functions
sys.displayhook = sys.__displayhook__
sys.excepthook = sys.__excepthook__ | 0.007477 |
def read(self, table):
"""..."""
# Table exists
if table in self.by_id:
return self.by_id[table].values()
if table in self.cache:
return self.cache[table]
# Read table
cls = self.FACTORIES[table]
key = cls.KEY
if key:
if table not in self.by_id:
self.by_id[table] = {}
t = self.by_id[table]
for item in self.iterread(table):
t[item.get(key)] = item
return t.values()
if table not in self.cache:
self.cache[table] = []
t = self.cache[table]
for item in self.iterread(table):
t.append(item)
return t | 0.014778 |
def reset( self ):
"""
Resets the colors to the default settings.
"""
dataSet = self.dataSet()
if ( not dataSet ):
dataSet = XScheme()
dataSet.reset() | 0.031963 |
def receive(self, max_batch_size=None, timeout=None):
"""
Receive events from the EventHub.
:param max_batch_size: Receive a batch of events. Batch size will
be up to the maximum specified, but will return as soon as service
returns no new events. If combined with a timeout and no events are
retrieve before the time, the result will be empty. If no batch
size is supplied, the prefetch size will be the maximum.
:type max_batch_size: int
:rtype: list[~azure.eventhub.common.EventData]
"""
if self.error:
raise self.error
if not self.running:
raise ValueError("Unable to receive until client has been started.")
data_batch = []
try:
timeout_ms = 1000 * timeout if timeout else 0
message_batch = self._handler.receive_message_batch(
max_batch_size=max_batch_size,
timeout=timeout_ms)
for message in message_batch:
event_data = EventData(message=message)
self.offset = event_data.offset
data_batch.append(event_data)
return data_batch
except (errors.TokenExpired, errors.AuthenticationException):
log.info("Receiver disconnected due to token error. Attempting reconnect.")
self.reconnect()
return data_batch
except (errors.LinkDetach, errors.ConnectionClose) as shutdown:
if shutdown.action.retry and self.auto_reconnect:
log.info("Receiver detached. Attempting reconnect.")
self.reconnect()
return data_batch
log.info("Receiver detached. Shutting down.")
error = EventHubError(str(shutdown), shutdown)
self.close(exception=error)
raise error
except errors.MessageHandlerError as shutdown:
if self.auto_reconnect:
log.info("Receiver detached. Attempting reconnect.")
self.reconnect()
return data_batch
log.info("Receiver detached. Shutting down.")
error = EventHubError(str(shutdown), shutdown)
self.close(exception=error)
raise error
except Exception as e:
log.info("Unexpected error occurred (%r). Shutting down.", e)
error = EventHubError("Receive failed: {}".format(e))
self.close(exception=error)
raise error | 0.001592 |
def create_image_summary(name, val):
"""
Args:
name(str):
val(np.ndarray): 4D tensor of NHWC. assume RGB if C==3.
Can be either float or uint8. Range has to be [0,255].
Returns:
tf.Summary:
"""
assert isinstance(name, six.string_types), type(name)
n, h, w, c = val.shape
val = val.astype('uint8')
s = tf.Summary()
imparams = [cv2.IMWRITE_PNG_COMPRESSION, 9]
for k in range(n):
arr = val[k]
# CV2 will only write correctly in BGR chanel order
if c == 3:
arr = cv2.cvtColor(arr, cv2.COLOR_RGB2BGR)
elif c == 4:
arr = cv2.cvtColor(arr, cv2.COLOR_RGBA2BGRA)
tag = name if n == 1 else '{}/{}'.format(name, k)
retval, img_str = cv2.imencode('.png', arr, imparams)
if not retval:
# Encoding has failed.
continue
img_str = img_str.tostring()
img = tf.Summary.Image()
img.height = h
img.width = w
# 1 - grayscale 3 - RGB 4 - RGBA
img.colorspace = c
img.encoded_image_string = img_str
s.value.add(tag=tag, image=img)
return s | 0.000858 |
def session(df, start='17:00', end='16:00'):
""" remove previous globex day from df """
if df.empty:
return df
# get start/end/now as decimals
int_start = list(map(int, start.split(':')))
int_start = (int_start[0] + int_start[1] - 1 / 100) - 0.0001
int_end = list(map(int, end.split(':')))
int_end = int_end[0] + int_end[1] / 100
int_now = (df[-1:].index.hour[0] + (df[:1].index.minute[0]) / 100)
# same-dat session?
is_same_day = int_end > int_start
# set pointers
curr = prev = df[-1:].index[0].strftime('%Y-%m-%d')
# globex/forex session
if not is_same_day:
prev = (datetime.strptime(curr, '%Y-%m-%d') -
timedelta(1)).strftime('%Y-%m-%d')
# slice
if int_now >= int_start:
df = df[df.index >= curr + ' ' + start]
else:
df = df[df.index >= prev + ' ' + start]
return df.copy() | 0.001107 |
def mmGetMetricStabilityConfusion(self):
"""
For each iteration that doesn't follow a reset, looks at every other
iteration for the same world that doesn't follow a reset, and computes the
number of bits that show up in one or the other set of active cells for
that iteration, but not both. This metric returns the distribution of those
numbers.
@return (Metric) Stability confusion metric
"""
self._mmComputeSequenceRepresentationData()
numbers = self._mmData["stabilityConfusion"]
return Metric(self, "stability confusion", numbers) | 0.001727 |
def monkey_patch():
"""
Monkey patches `zmq.Context` and `zmq.Socket`
If test_suite is True, the pyzmq test suite will be patched for
compatibility as well.
"""
ozmq = __import__('zmq')
ozmq.Socket = zmq.Socket
ozmq.Context = zmq.Context
ozmq.Poller = zmq.Poller
ioloop = __import__('zmq.eventloop.ioloop')
ioloop.Poller = zmq.Poller | 0.002646 |
def fun_inverse(fun=None, y=0, x0=None, args=(), disp=False, method='Nelder-Mead', **kwargs):
r"""Find the threshold level that accomplishes the desired specificity
Call indicated function repeatedly to find answer to the inverse function evaluation
Arguments:
fun (function): function to be calculate an inverse for
y (float): desired output of fun
x0 (float): initial guess at input to fun, the fun arg that will be adjusted
args (list or tuple): constants arguments to fun which will not be adjusted
constraints (tuple): dictionary of optimizer constraints (see scipy.optimize.minimize)
disp (bool): whether to display incremental results during optimization
method (str): one of the scipy.optimize.minimize methods
additional kwargs are passed along to the minimize function
fun_inverse can be used to calculate a trivial square root:
>>> round(fun_inverse(fun=lambda x: x**2, y=9, x0=0), 6)
3.0
"""
fun_inverse.fun = cost_fun.fun = fun if fun is not None else getattr(fun_inverse, 'fun', lambda x: x)
fun_inverse.target = cost_fun.target = y or 0
fun_inverse.verbose = verbose = cost_fun.verbose = kwargs.pop(
'verbose', getattr(cost_fun, 'verbose', getattr(fun_inverse, 'verbose', False)))
fun_inverse.x0 = x0 = x0 if x0 is not None else getattr(fun_inverse, 'x0', 0) or 0
if verbose:
print(' x0: {}\ntarget: {}\n'.format(fun_inverse.x0, fun_inverse.target))
res = minimize(cost_fun,
x0=x0,
args=args,
options=kwargs.pop('options', {}),
method=method,
**kwargs
)
if isinstance(x0, NUMERIC_TYPES):
return res.x[0]
return res.x | 0.005612 |
def get_triples(self, subject=None, predicate=None, object_=None):
"""Returns triples that correspond to the specified subject,
predicates, and objects."""
for triple in self.triples:
# Filter out non-matches
if subject is not None and triple['subject'] != subject:
continue
if predicate is not None and triple['predicate'] != predicate:
continue
if object_ is not None and triple['object'] != object_:
continue
yield triple | 0.003578 |
def delete(self):
" Delete the address."
response = self.dyn.delete(self.delete_url)
return response.content['job_id'] | 0.014085 |
def splits_and_paths(self, data_dir):
"""List of pairs (split, paths) for the current epoch."""
filepath_fns = {
problem.DatasetSplit.TRAIN: self.training_filepaths,
problem.DatasetSplit.EVAL: self.dev_filepaths,
problem.DatasetSplit.TEST: self.test_filepaths,
}
def append_epoch(paths):
return [
"{}.{}".format(path, self.current_epoch)
for path in paths
]
# We set shuffled=True as we don't want to shuffle on disk later.
return [
(split["split"], append_epoch(filepath_fns[split["split"]](
data_dir, split["shards"], shuffled=True
)))
for split in self.dataset_splits
] | 0.002882 |
def _convert_data(self, data_type, num_elems, num_values, indata):
'''
Converts "indata" into a byte stream
Parameters:
data_type : int
The CDF file data type
num_elems : int
The number of elements in the data
num_values : int
The number of values in each record
indata : (varies)
The data to be converted
Returns:
recs : int
The number of records generated by converting indata
odata : byte stream
The stream of bytes to write to the CDF file
'''
recSize = CDF._datatype_size(data_type, num_elems) * num_values
if (isinstance(indata, list) or isinstance(indata, tuple)):
size = len(indata)
if (data_type == CDF.CDF_CHAR or data_type == CDF.CDF_UCHAR):
odata = ''
for x in range(0, size):
adata = indata[x]
if (isinstance(adata, list) or isinstance(adata, tuple)):
size2 = len(adata)
for y in range(0, size2):
odata += adata[y].ljust(num_elems, '\x00')
else:
size2 = 1
odata += adata.ljust(num_elems, '\x00')
recs = int((size*size2)/num_values)
return recs, odata.encode()
else:
tofrom = self._convert_option()
dt_string = CDF._convert_type(data_type)
recs = int(size/num_values)
if (data_type == CDF.CDF_EPOCH16 and
isinstance(indata[0], complex)):
complex_data = []
for x in range(0, recs):
acomplex = indata[x]
complex_data.append(acomplex.real)
complex_data.append(acomplex.imag)
size = 2 * size
indata = complex_data
if (data_type == CDF.CDF_EPOCH16 and
not isinstance(indata[0], complex)):
recs = int(recs/2)
form = tofrom + str(size) + dt_string
return recs, struct.pack(form, *indata)
elif (isinstance(indata, bytes)):
tofrom = self._convert_option()
recs = int(len(indata) / recSize)
dt_string = CDF._convert_type(data_type)
size = recs * num_values * num_elems
if (data_type == CDF.CDF_EPOCH16):
size = size * 2
form = str(size) + dt_string
form2 = tofrom + form
datau = struct.unpack(form, indata)
return recs, struct.pack(form2, *datau)
elif (isinstance(indata, np.ndarray)):
tofrom = self._convert_option()
npdata = CDF._convert_nptype(data_type, indata)
recs = len(indata)
dt_string = CDF._convert_type(data_type)
if (data_type == CDF.CDF_EPOCH16):
num_elems = 2 * num_elems
form = str(recs*num_values*num_elems) + dt_string
form2 = tofrom + str(recs*num_values*num_elems) + dt_string
datau = struct.unpack(form, npdata)
return recs, struct.pack(form2, *datau)
elif (isinstance(indata, str)):
return 1, indata.ljust(num_elems, '\x00').encode()
else:
tofrom = self._convert_option()
dt_string = CDF._convert_type(data_type)
if (data_type == CDF.CDF_EPOCH16):
num_elems = 2 * num_elems
try:
recs = int(len(indata) / recSize)
except:
recs = 1
if (data_type == CDF.CDF_EPOCH16):
complex_data = []
if (recs > 1):
for x in range(0, recs):
acomplex = indata[x]
complex_data.append(acomplex.real)
complex_data.append(acomplex.imag)
else:
complex_data.append(indata.real)
complex_data.append(indata.imag)
indata = complex_data
form = tofrom + str(recs*num_values*num_elems) + dt_string
if (recs*num_values*num_elems > 1):
return recs, struct.pack(form, *indata)
else:
return recs, struct.pack(form, indata) | 0.000662 |
def offer(self, item, timeout=0):
"""
Inserts the specified element into this queue if it is possible to do so immediately without violating capacity
restrictions. Returns ``true`` upon success. If there is no space currently available:
* If a timeout is provided, it waits until this timeout elapses and returns the result.
* If a timeout is not provided, returns ``false`` immediately.
:param item: (object), the item to be added.
:param timeout: (long), maximum time in seconds to wait for addition (optional).
:return: (bool), ``true`` if the element was added to this queue, ``false`` otherwise.
"""
check_not_none(item, "Value can't be None")
element_data = self._to_data(item)
return self._encode_invoke(queue_offer_codec, value=element_data, timeout_millis=to_millis(timeout)) | 0.008999 |
def read_json_file(cls, path):
"""
Read an instance from a JSON-formatted file.
:return: A new instance
"""
with open(path, 'r') as f:
return cls.from_dict(json.load(f)) | 0.009009 |
def _gather_reverses(self):
"""
Get all the related objects that point to this
object that we need to clone. Uses self.clone_related
to find those objects.
"""
old_reverses = {'m2m': {}, 'reverse': {}}
for reverse in self.clone_related:
ctype, name, l = self._gather_reverse(reverse)
old_reverses[ctype][reverse] = (name, l)
return old_reverses | 0.006912 |
def _apply_mask(
data: np.ndarray,
encoded_fill_values: list,
decoded_fill_value: Any,
dtype: Any,
) -> np.ndarray:
"""Mask all matching values in a NumPy arrays."""
data = np.asarray(data, dtype=dtype)
condition = False
for fv in encoded_fill_values:
condition |= data == fv
return np.where(condition, decoded_fill_value, data) | 0.002688 |
def _send_packet(self, data):
" Send to server. "
data = json.dumps(data).encode('utf-8')
# Be sure that our socket is blocking, otherwise, the send() call could
# raise `BlockingIOError` if the buffer is full.
self.socket.setblocking(1)
self.socket.send(data + b'\0') | 0.006289 |
def angle(v1,v2, cos=False):
"""
Find the angle between two vectors.
:param cos: If True, the cosine of the
angle will be returned. False by default.
"""
n = (norm(v1)*norm(v2))
_ = dot(v1,v2)/n
return _ if cos else N.arccos(_) | 0.011538 |
def fit(self, X, y, cost_mat):
""" Build a example-dependent cost-sensitive logistic regression from the training set (X, y, cost_mat)
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
y : array indicator matrix
Ground truth (correct) labels.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
Returns
-------
self : object
Returns self.
"""
#TODO: Check input
n_features = X.shape[1]
if self.fit_intercept:
w0 = np.zeros(n_features + 1)
else:
w0 = np.zeros(n_features)
if self.solver == 'ga':
#TODO: add n_jobs
res = GeneticAlgorithmOptimizer(_logistic_cost_loss,
w0.shape[0],
iters=self.max_iter,
type_='cont',
n_chromosomes=100,
per_mutations=0.25,
n_elite=10,
fargs=(X, y, cost_mat, 1. / self.C),
range_=(-5, 5),
n_jobs=1,
verbose=self.verbose)
res.fit()
elif self.solver == 'bfgs':
if self.verbose > 0:
disp = True
else:
disp = False
res = minimize(_logistic_cost_loss,
w0,
method='BFGS',
args=(X, y, cost_mat, 1. / self.C),
tol=self.tol,
options={'maxiter': self.max_iter, 'disp': disp})
if self.fit_intercept:
self.coef_ = res.x[:-1]
self.intercept_ = res.x[-1]
else:
self.coef_ = res.x | 0.003085 |
def gcal2jd(year, month, day):
"""Gregorian calendar date to Julian date.
The input and output are for the proleptic Gregorian calendar,
i.e., no consideration of historical usage of the calendar is
made.
Parameters
----------
year : int
Year as an integer.
month : int
Month as an integer.
day : int
Day as an integer.
Returns
-------
jd1, jd2: 2-element tuple of floats
When added together, the numbers give the Julian date for the
given Gregorian calendar date. The first number is always
MJD_0 i.e., 2451545.5. So the second is the MJD.
Examples
--------
>>> gcal2jd(2000,1,1)
(2400000.5, 51544.0)
>>> 2400000.5 + 51544.0 + 0.5
2451545.0
>>> year = [-4699, -2114, -1050, -123, -1, 0, 1, 123, 1678.0, 2000,
....: 2012, 2245]
>>> month = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
>>> day = [1, 12, 23, 14, 25, 16, 27, 8, 9, 10, 11, 31]
>>> x = [gcal2jd(y, m, d) for y, m, d in zip(year, month, day)]
>>> for i in x: print i
(2400000.5, -2395215.0)
(2400000.5, -1451021.0)
(2400000.5, -1062364.0)
(2400000.5, -723762.0)
(2400000.5, -679162.0)
(2400000.5, -678774.0)
(2400000.5, -678368.0)
(2400000.5, -633797.0)
(2400000.5, -65812.0)
(2400000.5, 51827.0)
(2400000.5, 56242.0)
(2400000.5, 141393.0)
Negative months and days are valid. For example, 2000/-2/-4 =>
1999/+12-2/-4 => 1999/10/-4 => 1999/9/30-4 => 1999/9/26.
>>> gcal2jd(2000, -2, -4)
(2400000.5, 51447.0)
>>> gcal2jd(1999, 9, 26)
(2400000.5, 51447.0)
>>> gcal2jd(2000, 2, -1)
(2400000.5, 51573.0)
>>> gcal2jd(2000, 1, 30)
(2400000.5, 51573.0)
>>> gcal2jd(2000, 3, -1)
(2400000.5, 51602.0)
>>> gcal2jd(2000, 2, 28)
(2400000.5, 51602.0)
Month 0 becomes previous month.
>>> gcal2jd(2000, 0, 1)
(2400000.5, 51513.0)
>>> gcal2jd(1999, 12, 1)
(2400000.5, 51513.0)
Day number 0 becomes last day of previous month.
>>> gcal2jd(2000, 3, 0)
(2400000.5, 51603.0)
>>> gcal2jd(2000, 2, 29)
(2400000.5, 51603.0)
If `day` is greater than the number of days in `month`, then it
gets carried over to the next month.
>>> gcal2jd(2000,2,30)
(2400000.5, 51604.0)
>>> gcal2jd(2000,3,1)
(2400000.5, 51604.0)
>>> gcal2jd(2001,2,30)
(2400000.5, 51970.0)
>>> gcal2jd(2001,3,2)
(2400000.5, 51970.0)
Notes
-----
The returned Julian date is for mid-night of the given date. To
find the Julian date for any time of the day, simply add time as a
fraction of a day. For example Julian date for mid-day can be
obtained by adding 0.5 to either the first part or the second
part. The latter is preferable, since it will give the MJD for the
date and time.
BC dates should be given as -(BC - 1) where BC is the year. For
example 1 BC == 0, 2 BC == -1, and so on.
Negative numbers can be used for `month` and `day`. For example
2000, -1, 1 is the same as 1999, 11, 1.
The Julian dates are proleptic Julian dates, i.e., values are
returned without considering if Gregorian dates are valid for the
given date.
The input values are truncated to integers.
"""
year = int(year)
month = int(month)
day = int(day)
a = ipart((month - 14) / 12.0)
jd = ipart((1461 * (year + 4800 + a)) / 4.0)
jd += ipart((367 * (month - 2 - 12 * a)) / 12.0)
x = ipart((year + 4900 + a) / 100.0)
jd -= ipart((3 * x) / 4.0)
jd += day - 2432075.5 # was 32075; add 2400000.5
jd -= 0.5 # 0 hours; above JD is for midday, switch to midnight.
return MJD_0, jd | 0.000269 |
async def reseed(self, seed: str = None) -> None:
"""
Rotate key for VON anchor: generate new key, submit to ledger, update wallet.
Raise WalletState if wallet is currently closed.
:param seed: new seed for ed25519 key pair (default random)
"""
LOGGER.debug('BaseAnchor.reseed >>> seed: [SEED]')
verkey = await self.wallet.reseed_init(seed)
req_json = await ledger.build_nym_request(
self.did,
self.did,
verkey,
self.name,
(await self.get_nym_role()).token())
await self._sign_submit(req_json)
await self.wallet.reseed_apply()
LOGGER.debug('BaseAnchor.reseed <<<') | 0.00419 |
def _assert_response_success(self, response):
"""
:type response: requests.Response
:rtype: None
:raise ApiException: When the response is not successful.
"""
if response.status_code != self._STATUS_CODE_OK:
raise ExceptionFactory.create_exception_for_response(
response.status_code,
self._fetch_all_error_message(response),
self._fetch_response_id(response)
) | 0.004149 |
def get_gene_ids(fusion_bed):
"""
Parses FusionInspector bed file to ascertain the ENSEMBL gene ids
:param str fusion_bed: path to fusion annotation
:return: dict
"""
with open(fusion_bed, 'r') as f:
gene_to_id = {}
regex = re.compile(r'(?P<gene>ENSG\d*)')
for line in f:
line = line.split('\t')
transcript, gene_bit, name = line[3].split(';')
m = regex.search(gene_bit)
if m:
gene_to_id[name] = m.group('gene')
return gene_to_id | 0.001832 |
def locate(self, minimum_version=None, maximum_version=None, jdk=False):
"""Finds a java distribution that meets the given constraints and returns it.
First looks for a cached version that was previously located, otherwise calls locate().
:param minimum_version: minimum jvm version to look for (eg, 1.7).
The stricter of this and `--jvm-distributions-minimum-version` is used.
:param maximum_version: maximum jvm version to look for (eg, 1.7.9999).
The stricter of this and `--jvm-distributions-maximum-version` is used.
:param bool jdk: whether the found java distribution is required to have a jdk.
:return: the Distribution.
:rtype: :class:`Distribution`
:raises: :class:`Distribution.Error` if no suitable java distribution could be found.
"""
def _get_stricter_version(a, b, name, stricter):
version_a = _parse_java_version(name, a)
version_b = _parse_java_version(name, b)
if version_a is None:
return version_b
if version_b is None:
return version_a
return stricter(version_a, version_b)
# Take the tighter constraint of method args and subsystem options.
minimum_version = _get_stricter_version(minimum_version,
self._minimum_version,
"minimum_version",
max)
maximum_version = _get_stricter_version(maximum_version,
self._maximum_version,
"maximum_version",
min)
key = (minimum_version, maximum_version, jdk)
dist = self._cache.get(key)
if not dist:
dist = self._scan_constraint_match(minimum_version, maximum_version, jdk)
if not dist:
dist = self._locate(minimum_version=minimum_version,
maximum_version=maximum_version,
jdk=jdk)
self._cache[key] = dist
return dist | 0.007156 |
def verify_row(self, row):
"""Checks parameter at index *row* for invalidating conditions
:returns: str -- message if error, 0 otherwise
"""
param = self._parameters[row]
if param['parameter'] == '':
return "Auto-parameter type undefined"
if len(param['selection']) == 0:
return "At least one component must be selected for each auto-parameter"
if param['parameter'] not in self._selectionParameters(param):
return 'Parameter {} not present in all selected components'.format(param['parameter'])
if param['parameter'] == 'filename':
if len(param['names']) < 1:
return "No filenames in file auto-parameter list"
else:
if param['step'] == 0 and param['start'] != param['stop']:
return "Auto-parameter step size of 0 not allowed"
if abs(param['stop'] - param['start']) < param['step']:
return "Auto-parameter step size larger than range"
if not self.checkLimits(row, param['start']):
return "Auto-parameter start value invalid"
if not self.checkLimits(row, param['stop']):
return "Auto-parameter stop value invalid"
return 0 | 0.00313 |
def __uncache(self, file):
"""
Uncaches given file.
:param file: File to uncache.
:type file: unicode
"""
if file in self.__files_cache:
self.__files_cache.remove_content(file) | 0.008403 |
def create_key_for_data(prefix, data, key_params):
"""
From ``data`` params in task create corresponding key with help of ``key_params`` (defined in decorator)
"""
d = data.get_data()
values = []
for k in key_params:
if k in d and type(d[k]) is list:
values.append("{0}:{1}".format(k, " -".join(d[k])))
else:
value = d[k] if k in d else ''
values.append("{0}:{1}".format(k, value))
return "{0}-{1}".format(prefix, "-".join(values)) | 0.003906 |
def A2(self):
"""
If the "qop" directive's value is "auth" or is unspecified, then A2 is:
A2 = Method ":" digest-uri-value
Else,
A2 = Method ":" digest-uri-value ":" H(entity-body)
"""
if self.params['qop'] == 'auth' or not self.params['qop']:
return self.request.method + ":" + self.request.uri
elif self.params['qop'] == 'auth-int':
#print "UNSUPPORTED 'qop' METHOD\n"
return ":".join([self.request.method,
self.request.uri,
self.apply_checksum(self.request.body)])
else:
print "A2 GOT BAD VALUE FOR 'qop': %s\n" % self.params['qop'] | 0.004155 |
def search_with_retry(self, sleeptime=3.0, retrycount=3, **params):
"""
This function performs a search given a dictionary of search(..)
parameters. It accounts for server timeouts as necessary and
will retry some number of times.
@param sleeptime: number of seconds to sleep between retries
@type sleeptime: float
@param retrycount: number of times to retry given search
@type retrycount: int
@param params: search parameters
@type params: **kwds
@rtype: list
@return: returns records in given format
"""
results = []
count = 0
while count < retrycount:
try:
results = self.search(**params)
break
except urllib2.URLError:
sys.stderr.write("Timeout while searching...Retrying\n")
time.sleep(sleeptime)
count += 1
else:
sys.stderr.write("Aborting search after %d attempts.\n" % (retrycount,))
return results | 0.002804 |
def relStdDev(self, limit=None):
"""return the relative standard deviation optionally limited to the last limit values"""
moments = self.meanAndStdDev(limit)
if moments is None:
return None
return moments[1] / moments[0] | 0.011364 |
def prepare_working_directory(job, submission_path, validator_path):
'''
Based on two downloaded files in the working directory,
the student submission and the validation package,
the working directory is prepared.
We unpack student submission first, so that teacher files overwrite
them in case.
When the student submission is a single directory, we change the
working directory and go directly into it, before dealing with the
validator stuff.
If unrecoverable errors happen, such as an empty student archive,
a JobException is raised.
'''
# Safeguard for fail-fast in disk full scenarios on the executor
dusage = shutil.disk_usage(job.working_dir)
if dusage.free < 1024 * 1024 * 50: # 50 MB
info_student = "Internal error with the validator. Please contact your course responsible."
info_tutor = "Error: Execution cancelled, less then 50MB of disk space free on the executor."
logger.error(info_tutor)
raise JobException(info_student=info_student, info_tutor=info_tutor)
submission_fname = os.path.basename(submission_path)
validator_fname = os.path.basename(validator_path)
# Un-archive student submission
single_dir, did_unpack = unpack_if_needed(job.working_dir, submission_path)
job.student_files = os.listdir(job.working_dir)
if did_unpack:
job.student_files.remove(submission_fname)
# Fail automatically on empty student submissions
if len(job.student_files) is 0:
info_student = "Your compressed upload is empty - no files in there."
info_tutor = "Submission archive file has no content."
logger.error(info_tutor)
raise JobException(info_student=info_student, info_tutor=info_tutor)
# Handle student archives containing a single directory with all data
if single_dir:
logger.warning(
"The submission archive contains only one directory. Changing working directory.")
# Set new working directory
job.working_dir = job.working_dir + single_dir + os.sep
# Move validator package there
shutil.move(validator_path, job.working_dir)
validator_path = job.working_dir + validator_fname
# Re-scan for list of student files
job.student_files = os.listdir(job.working_dir)
# The working directory now only contains the student data and the downloaded
# validator package.
# Update the file list accordingly.
job.student_files.remove(validator_fname)
logger.debug("Student files: {0}".format(job.student_files))
# Unpack validator package
single_dir, did_unpack = unpack_if_needed(job.working_dir, validator_path)
if single_dir:
info_student = "Internal error with the validator. Please contact your course responsible."
info_tutor = "Error: Directories are not allowed in the validator archive."
logger.error(info_tutor)
raise JobException(info_student=info_student, info_tutor=info_tutor)
if not os.path.exists(job.validator_script_name):
if did_unpack:
# The download was an archive, but the validator was not inside.
# This is a failure of the tutor.
info_student = "Internal error with the validator. Please contact your course responsible."
info_tutor = "Error: Missing validator.py in the validator archive."
logger.error(info_tutor)
raise JobException(info_student=info_student,
info_tutor=info_tutor)
else:
# The download is already the script, but has the wrong name
logger.warning("Renaming {0} to {1}.".format(
validator_path, job.validator_script_name))
shutil.move(validator_path, job.validator_script_name) | 0.002357 |
def _build_module_db(self):
"""
Build database of module callables sorted by line number.
The database is a dictionary whose keys are module file names and
whose values are lists of dictionaries containing name and line
number of callables in that module
"""
tdict = collections.defaultdict(lambda: [])
for callable_name, callable_dict in self._exh_obj.callables_db.items():
fname, line_no = callable_dict["code_id"]
cname = (
"{cls_name}.__init__".format(cls_name=callable_name)
if callable_dict["type"] == "class"
else callable_name
)
tdict[fname].append({"name": cname, "line": line_no})
for fname in tdict.keys():
self._module_obj_db[fname] = sorted(
tdict[fname], key=lambda idict: idict["line"]
) | 0.002195 |
def write_csv(filename, header, data=None, rows=None, mode="w"):
"""Write the data to the specified filename
Usage
-----
>>> write_csv(filename, header, data, mode=mode)
Parameters
----------
filename : str
The name of the file
header : list of strings
The names of the columns (or fields):
(fieldname1, fieldname2, ...)
data : list of dictionaries (optional)
[
{fieldname1: a1, fieldname2: a2},
{fieldname1: b1, fieldname2: b2},
...
]
rows : list of lists (optional)
[
(a1, a2),
(b1, b2),
...
]
mode : str (optional)
"w": write the data to the file by overwriting it
"a": write the data to the file by appending them
Returns
-------
None. A CSV file is written.
"""
if data == rows == None:
msg = "You must specify either data or rows"
raise ValueError(msg)
elif data != None and rows != None:
msg = "You must specify either data or rows. Not both"
raise ValueError(msg)
data_header = dict((x, x) for x in header)
with open(filename, mode) as f:
if data:
writer = csv.DictWriter(f, fieldnames=header)
if mode == "w":
writer.writerow(data_header)
writer.writerows(data)
elif rows:
writer = csv.writer(f)
if mode == "w":
writer.writerow(header)
writer.writerows(rows)
print "Saved %s." % filename | 0.003193 |
def copy(self):
"""
Returns a duplicate of this instance.
:return <Query>
"""
options = {
'op': self.__op,
'caseSensitive': self.__caseSensitive,
'value': copy.copy(self.__value),
'inverted': self.__inverted,
'functions': copy.copy(self.__functions),
'math': copy.copy(self.__math)
}
return orb.Query(self.__model, self.__column, **options) | 0.004228 |
def diff(self, cursor='null', **kwargs):
"""文件增量更新操作查询接口.
本接口有数秒延迟,但保证返回结果为最终一致.
:param cursor: 用于标记更新断点。
* 首次调用cursor=null;
* 非首次调用,使用最后一次调用diff接口的返回结果
中的cursor。
:type cursor: str
:return: Response 对象
"""
params = {
'cursor': cursor,
}
return self._request('file', 'diff', extra_params=params, **kwargs) | 0.004329 |
def xml_parser(self, scode, *args):
"""
args[0]: xpath
args[1]: text / html / xml
"""
allow_method = ('text', 'html', 'xml')
xpath_string, method = args
assert method in allow_method, 'method allow: %s' % allow_method
result = self.ensure_list(
self._fromstring(scode,
parser=self._xml_parser).xpath(xpath_string))
result = [
self._tostring(
item, method=method, with_tail=0, encoding='unicode')
for item in result
]
return result | 0.003333 |
def short_help(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
help - help text
}
"""
o=i.get('out','')
r=version({})
if r['return']>0: return r
h='CK version: '+r['version_str']+'\n'
r=python_version({})
if r['return']>0: return r
h+='\nPython version used by CK: '+r['version']+'\n'
h+='\nAll internal CK commands: ck help\n'
h+='\n'+cfg['help_web'].replace('\n','').strip().replace(' ','')
if o=='con':
out(h)
return {'return':0, 'help':h} | 0.027586 |
def _set_next_vrf_list(self, v, load=False):
"""
Setter method for next_vrf_list, mapped from YANG variable /rbridge_id/route_map/content/set/ipv6/next_vrf/next_vrf_list (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_next_vrf_list is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_next_vrf_list() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("vrf next_hop",next_vrf_list.next_vrf_list, yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf next-hop', extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """next_vrf_list must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("vrf next_hop",next_vrf_list.next_vrf_list, yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf next-hop', extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)""",
})
self.__next_vrf_list = t
if hasattr(self, '_set'):
self._set() | 0.00463 |
def _read_signer(key_filename):
"""Reads the given file as a hex key.
Args:
key_filename: The filename where the key is stored. If None,
defaults to the default key for the current user.
Returns:
Signer: the signer
Raises:
CliException: If unable to read the file.
"""
filename = key_filename
if filename is None:
filename = os.path.join(os.path.expanduser('~'),
'.sawtooth',
'keys',
getpass.getuser() + '.priv')
try:
with open(filename, 'r') as key_file:
signing_key = key_file.read().strip()
except IOError as e:
raise CliException('Unable to read key file: {}'.format(str(e)))
try:
private_key = Secp256k1PrivateKey.from_hex(signing_key)
except ParseError as e:
raise CliException('Unable to read key in file: {}'.format(str(e)))
context = create_context('secp256k1')
crypto_factory = CryptoFactory(context)
return crypto_factory.new_signer(private_key) | 0.000907 |
def parse(self, filename, verbose=0):
"""
Parse the given file. Return :class:`EventReport`.
"""
run_completed, start_datetime, end_datetime = False, None, None
filename = os.path.abspath(filename)
report = EventReport(filename)
w = WildCard("*Error|*Warning|*Comment|*Bug|*ERROR|*WARNING|*COMMENT|*BUG")
import warnings
warnings.simplefilter('ignore', yaml.error.UnsafeLoaderWarning)
with YamlTokenizer(filename) as tokens:
for doc in tokens:
if w.match(doc.tag):
#print("got doc.tag", doc.tag,"--")
try:
#print(doc.text)
event = yaml.load(doc.text) # Can't use ruamel safe_load!
#yaml.load(doc.text, Loader=ruamel.yaml.Loader)
#print(event.yaml_tag, type(event))
except:
#raise
# Wrong YAML doc. Check tha doc tag and instantiate the proper event.
message = "Malformatted YAML document at line: %d\n" % doc.lineno
message += doc.text
# This call is very expensive when we have many exceptions due to malformatted YAML docs.
if verbose:
message += "Traceback:\n %s" % straceback()
if "error" in doc.tag.lower():
print("It seems an error. doc.tag:", doc.tag)
event = AbinitYamlError(message=message, src_file=__file__, src_line=0)
else:
event = AbinitYamlWarning(message=message, src_file=__file__, src_line=0)
event.lineno = doc.lineno
report.append(event)
# Check whether the calculation completed.
if doc.tag == "!FinalSummary":
#print(doc)
run_completed = True
d = doc.as_dict()
#print(d)
start_datetime, end_datetime = d["start_datetime"], d["end_datetime"]
report.set_run_completed(run_completed, start_datetime, end_datetime)
return report | 0.007792 |
def _parse_hz(self,hz,Hz,dHzdz):
"""
NAME:
_parse_hz
PURPOSE:
Parse the various input options for Sigma* functions
HISTORY:
2016-12-27 - Written - Bovy (UofT/CCA)
"""
if isinstance(hz,dict):
hz= [hz]
try:
nhz= len(hz)
except TypeError:
hz= [hz]
Hz= [Hz]
dHzdz= [dHzdz]
nhz= 1
if nhz != self._nsigma and nhz != 1:
raise ValueError('Number of hz functions needs to be equal to the number of Sigma functions or to 1')
if nhz == 1 and self._nsigma > 1:
hz= [hz[0] for ii in range(self._nsigma)]
if not isinstance(hz[0],dict):
Hz= [Hz[0] for ii in range(self._nsigma)]
dHzdz= [dHzdz[0] for ii in range(self._nsigma)]
self._Hz= Hz
self._hz= hz
self._dHzdz= dHzdz
self._nhz= len(self._hz)
if isinstance(hz[0],dict):
self._hz_dict= copy.copy(hz)
self._parse_hz_dict()
else:
self._hz_dict= None
return None | 0.021758 |
def env(section, map_files, phusion, phusion_path, quiet, edit, create):
"""
Reads the file defined by the S3CONF variable and output its contents to stdout. Logs are printed to stderr.
See options for added functionality: editing file, mapping files, dumping in the phusion-baseimage format, etc.
"""
try:
logger.debug('Running env command')
settings = config.Settings(section=section)
storage = STORAGES['s3'](settings=settings)
conf = s3conf.S3Conf(storage=storage, settings=settings)
if edit:
conf.edit(create=create)
else:
env_vars = conf.get_envfile().as_dict()
if env_vars.get('S3CONF_MAP') and map_files:
conf.download_mapping(env_vars.get('S3CONF_MAP'))
if not quiet:
for var_name, var_value in sorted(env_vars.items(), key=lambda x: x[0]):
click.echo('{}={}'.format(var_name, var_value))
if phusion:
s3conf.phusion_dump(env_vars, phusion_path)
except exceptions.EnvfilePathNotDefinedError:
raise exceptions.EnvfilePathNotDefinedUsageError()
except exceptions.FileDoesNotExist as e:
raise UsageError('The file {} does not exist. Try "-c" option if you want to create it.'.format(str(e))) | 0.003797 |
def logout(self):
"""
Logout from steam. Doesn't nothing if not logged on.
.. note::
The server will drop the connection immediatelly upon logout.
"""
if self.logged_on:
self.logged_on = False
self.send(MsgProto(EMsg.ClientLogOff))
try:
self.wait_event(self.EVENT_DISCONNECTED, timeout=5, raises=True)
except:
self.disconnect()
self.idle() | 0.008264 |
def fatal(self, i: int=None) -> str:
"""
Returns a fatal error message
"""
head = "[" + colors.red("\033[1mfatal error") + "]"
if i is not None:
head = str(i) + " " + head
return head | 0.016461 |
def _init_design_chooser(self):
"""
Initializes the choice of X and Y based on the selected initial design and number of points selected.
"""
# If objective function was not provided, we require some initial sample data
if self.f is None and (self.X is None or self.Y is None):
raise InvalidConfigError("Initial data for both X and Y is required when objective function is not provided")
# Case 1:
if self.X is None:
self.X = initial_design(self.initial_design_type, self.space, self.initial_design_numdata)
self.Y, _ = self.objective.evaluate(self.X)
# Case 2
elif self.X is not None and self.Y is None:
self.Y, _ = self.objective.evaluate(self.X) | 0.007792 |
def copy(self):
"""
Returns a copy of this Markov Model.
Returns
-------
MarkovModel: Copy of this Markov model.
Examples
-------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> from pgmpy.models import MarkovModel
>>> G = MarkovModel()
>>> G.add_nodes_from([('a', 'b'), ('b', 'c')])
>>> G.add_edge(('a', 'b'), ('b', 'c'))
>>> G_copy = G.copy()
>>> G_copy.edges()
[(('a', 'b'), ('b', 'c'))]
>>> G_copy.nodes()
[('a', 'b'), ('b', 'c')]
>>> factor = DiscreteFactor([('a', 'b')], cardinality=[3],
... values=np.random.rand(3))
>>> G.add_factors(factor)
>>> G.get_factors()
[<DiscreteFactor representing phi(('a', 'b'):3) at 0x...>]
>>> G_copy.get_factors()
[]
"""
clone_graph = MarkovModel(self.edges())
clone_graph.add_nodes_from(self.nodes())
if self.factors:
factors_copy = [factor.copy() for factor in self.factors]
clone_graph.add_factors(*factors_copy)
return clone_graph | 0.001724 |
def create_build_from_buildrequest(self, build_request):
"""
render provided build_request and submit build from it
:param build_request: instance of build.build_request.BuildRequest
:return: instance of build.build_response.BuildResponse
"""
build_request.set_openshift_required_version(self.os_conf.get_openshift_required_version())
build = build_request.render()
response = self.os.create_build(json.dumps(build))
build_response = BuildResponse(response.json(), self)
return build_response | 0.005236 |
def thermal_fit_result(fit_result, v_residual=None,
v_label='Unit-cell volume $(\mathrm{\AA}^3)$',
temp_fitline=np.asarray(
[300., 1000., 1500., 2000., 2500., 3000.]),
figsize=(5, 5), height_ratios=(3, 1), ms_data=50,
p_err=None, v_err=None, cbar_loc=(0.99, 0.1, .01, 0.82),
pdf_filen=None, title='Fit result'):
"""
plot P-V-T EOS curve fitting result
:param fit_result: lmfit result object, see example jnb file for detail
:param v_label: label for volume axis
:param temp_fitline: temperatures to calculate isothermal compression
curves, default = [300., 1000., 1500., 2000., 2500., 3000.]
:param figsize: figure size, default = (7,7)
:param height_ratios: height ratio between the main and residue plots,
default = (3,1)
:param ms_data: marker size for data points
:param p_err: pressure error bar
:param v_err: volume error bar
:param cbar_loc: location of color bar
:param pdf_filen: name of pdf output file
:param title: title of the figure
:return: None
"""
# basic figure setup
f, ax = plt.subplots(2, 1, sharex=True, figsize=figsize,
gridspec_kw={'height_ratios': height_ratios})
for ax_i in ax:
ax_i.tick_params(direction='in')
# read data to plot
v_data = fit_result.userkws['v']
temp_data = fit_result.userkws['temp']
p_data = fit_result.data
p_datafit = fit_result.best_fit
v0 = uct.ufloat(fit_result.params['st_v0'].value,
fit_result.params['st_v0'].stderr)
sm = plt.cm.ScalarMappable(cmap=c_map,
norm=plt.Normalize(
vmin=300., vmax=temp_data.max()))
a = sm.to_rgba(temp_fitline)
v_fitline = np.linspace(v0.n, min(v_data), 1000)
fitmodel_copy = copy.deepcopy(fit_result)
for a_i, temp_i in zip(a, temp_fitline):
p_fitline = fitmodel_copy.eval(v=v_fitline,
temp=np.ones_like(v_fitline) * temp_i)
ax[0].plot(p_fitline, v_fitline, c=a_i)
# error range here does not make a lot sense, so not supported
# if (p_err is not None) and (v_err is not None):
ax[0].errorbar(p_data, v_data, xerr=p_err, yerr=v_err, fmt=' ', c='k',
capsize=0, elinewidth=0.5, label='Data', zorder=0)
points = ax[0].scatter(p_data, v_data, marker='o', s=ms_data, c=temp_data,
cmap=c_map, vmin=300., vmax=temp_data.max(),
zorder=1)
if v_residual is None:
ax[1].scatter(p_data, p_data - p_datafit, marker='o', s=ms_data,
c=temp_data, cmap=c_map, vmin=300.,
vmax=temp_data.max(), zorder=1)
ax[1].errorbar(p_data, p_data - p_datafit, yerr=p_err, fmt=' ', c='k',
capsize=0, elinewidth=0.5, label='Data', zorder=0)
ax[1].set_ylabel('$P_{obs} - P_{fit}$')
else:
ax[1].scatter(p_data, v_residual, marker='o', s=ms_data, c=temp_data,
cmap=c_map, vmin=300., vmax=temp_data.max(), zorder=1)
ax[1].errorbar(p_data, v_residual, yerr=p_err, fmt=' ', c='k',
capsize=0, elinewidth=0.5, label='Data', zorder=0)
ax[1].set_ylabel('$V_{obs} - V_{fit}$')
# ax[0].legend()
position = f.add_axes(cbar_loc)
f.colorbar(points, orientation="vertical", cax=position,
ticks=temp_fitline)
ax[1].axhline(0, c='k', ls='--')
ax[1].set_xlabel('Pressure (GPa)')
ax[0].set_ylabel(v_label)
ax[0].set_title(title)
plt.tight_layout()
if pdf_filen is not None:
f.savefig(pdf_filen) | 0.000792 |
def fcoe_fcoe_fabric_map_fcoe_fip_advertisement_fcoe_fip_advertisement_interval(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe = ET.SubElement(config, "fcoe", xmlns="urn:brocade.com:mgmt:brocade-fcoe")
fcoe_fabric_map = ET.SubElement(fcoe, "fcoe-fabric-map")
fcoe_fabric_map_name_key = ET.SubElement(fcoe_fabric_map, "fcoe-fabric-map-name")
fcoe_fabric_map_name_key.text = kwargs.pop('fcoe_fabric_map_name')
fcoe_fip_advertisement = ET.SubElement(fcoe_fabric_map, "fcoe-fip-advertisement")
fcoe_fip_advertisement_interval = ET.SubElement(fcoe_fip_advertisement, "fcoe-fip-advertisement-interval")
fcoe_fip_advertisement_interval.text = kwargs.pop('fcoe_fip_advertisement_interval')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.00905 |
def mode(self):
"""Computes the mode of a log-normal distribution built with the stats data."""
mu = self.mean()
sigma = self.std()
ret_val = math.exp(mu - sigma**2)
if math.isnan(ret_val):
ret_val = float("inf")
return ret_val | 0.010453 |
def setFormula(self, Formula=None):
"""Set the Dependent Services from the text of the calculation Formula
"""
bsc = getToolByName(self, 'bika_setup_catalog')
if Formula is None:
self.setDependentServices(None)
self.getField('Formula').set(self, Formula)
else:
keywords = re.compile(r"\[([^.^\]]+)\]").findall(Formula)
brains = bsc(portal_type='AnalysisService',
getKeyword=keywords)
services = [brain.getObject() for brain in brains]
self.getField('DependentServices').set(self, services)
self.getField('Formula').set(self, Formula) | 0.002933 |
def check_perf():
"Suggest how to improve the setup to speed things up"
from PIL import features, Image
from packaging import version
print("Running performance checks.")
# libjpeg_turbo check
print("\n*** libjpeg-turbo status")
if version.parse(Image.PILLOW_VERSION) >= version.parse("5.3.9"):
if features.check_feature('libjpeg_turbo'):
print("✔ libjpeg-turbo is on")
else:
print("✘ libjpeg-turbo is not on. It's recommended you install libjpeg-turbo to speed up JPEG decoding. See https://docs.fast.ai/performance.html#libjpeg-turbo")
else:
print(f"❓ libjpeg-turbo's status can't be derived - need Pillow(-SIMD)? >= 5.4.0 to tell, current version {Image.PILLOW_VERSION}")
# XXX: remove this check/note once Pillow and Pillow-SIMD 5.4.0 is available
pillow_ver_5_4_is_avail = pypi_module_version_is_available("Pillow", "5.4.0")
if pillow_ver_5_4_is_avail == False:
print("5.4.0 is not yet available, other than the dev version on github, which can be installed via pip from git+https://github.com/python-pillow/Pillow. See https://docs.fast.ai/performance.html#libjpeg-turbo")
# Pillow-SIMD check
print("\n*** Pillow-SIMD status")
if re.search(r'\.post\d+', Image.PILLOW_VERSION):
print(f"✔ Running Pillow-SIMD {Image.PILLOW_VERSION}")
else:
print(f"✘ Running Pillow {Image.PILLOW_VERSION}; It's recommended you install Pillow-SIMD to speed up image resizing and other operations. See https://docs.fast.ai/performance.html#pillow-simd")
# CUDA version check
# compatibility table: k: min nvidia ver is required for v: cuda ver
# note: windows nvidia driver version is slightly higher, see:
# https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html
# note: add new entries if pytorch starts supporting new cudaXX
nvidia2cuda = {
"410.00": "10.0",
"384.81": "9.0",
"367.48": "8.0",
}
print("\n*** CUDA status")
if torch.cuda.is_available():
pynvml = load_pynvml_env()
nvidia_ver = (pynvml.nvmlSystemGetDriverVersion().decode('utf-8') if platform.system() != "Darwin" else "Cannot be determined on OSX yet")
cuda_ver = torch.version.cuda
max_cuda = "8.0"
for k in sorted(nvidia2cuda.keys()):
if version.parse(nvidia_ver) > version.parse(k): max_cuda = nvidia2cuda[k]
if version.parse(str(max_cuda)) <= version.parse(cuda_ver):
print(f"✔ Running the latest CUDA {cuda_ver} with NVIDIA driver {nvidia_ver}")
else:
print(f"✘ You are running pytorch built against cuda {cuda_ver}, your NVIDIA driver {nvidia_ver} supports cuda10. See https://pytorch.org/get-started/locally/ to install pytorch built against the faster CUDA version.")
else:
print(f"❓ Running cpu-only torch version, CUDA check is not relevant")
print("\nRefer to https://docs.fast.ai/performance.html to make sense out of these checks and suggestions.") | 0.004915 |
def iter_terms(fs, conj=False):
"""Iterate through all min/max terms in an N-dimensional Boolean space.
The *fs* argument is a sequence of :math:`N` Boolean functions.
If *conj* is ``False``, yield minterms.
Otherwise, yield maxterms.
"""
for num in range(1 << len(fs)):
yield num2term(num, fs, conj) | 0.002994 |
def delete_site(self, webspace_name, website_name,
delete_empty_server_farm=False, delete_metrics=False):
'''
Delete a website.
webspace_name:
The name of the webspace.
website_name:
The name of the website.
delete_empty_server_farm:
If the site being deleted is the last web site in a server farm,
you can delete the server farm by setting this to True.
delete_metrics:
To also delete the metrics for the site that you are deleting, you
can set this to True.
'''
path = self._get_sites_details_path(webspace_name, website_name)
query = ''
if delete_empty_server_farm:
query += '&deleteEmptyServerFarm=true'
if delete_metrics:
query += '&deleteMetrics=true'
if query:
path = path + '?' + query.lstrip('&')
return self._perform_delete(path) | 0.003086 |
def find_spelling(n):
"""
Finds d, r s.t. n-1 = 2^r * d
"""
r = 0
d = n - 1
# divmod used for large numbers
quotient, remainder = divmod(d, 2)
# while we can still divide 2's into n-1...
while remainder != 1:
r += 1
d = quotient # previous quotient before we overwrite it
quotient, remainder = divmod(d, 2)
return r, d | 0.002525 |
def clear_old_backups(self, block_id):
"""
If we limit the number of backups we make, then clean out old ones
older than block_id - backup_max_age (given in the constructor)
This method does nothing otherwise.
Return None on success
Raise exception on error
"""
assert self.setup, "Not set up yet. Call .db_setup() first!"
if self.backup_max_age is None:
# never delete backups
return
# find old backups
backup_dir = config.get_backups_directory(self.impl, self.working_dir)
if not os.path.exists(backup_dir):
return
backups = os.listdir( backup_dir )
for backup_name in backups:
if backup_name in [".", ".."]:
continue
backup_path = os.path.join(backup_dir, backup_name)
backup_block = None
try:
backup_block = int(backup_path.split(".")[-1])
except:
# not a backup file
log.info("Skipping non-backup '%s'" % backup_path)
if not backup_path.endswith( ".bak.%s" % backup_block ):
# not a backup file
log.info("Skipping non-backup '%s'" % backup_path)
continue
if backup_block + self.backup_max_age < block_id:
# dead
log.info("Removing old backup '%s'" % backup_path)
try:
os.unlink(backup_path)
except:
pass | 0.010127 |
def t_backquote(self, t):
r'`'
t.lexer.string_start = t.lexer.lexpos
t.lexer.string_value = ''
t.lexer.push_state('backquote') | 0.012658 |
def private_config_content(self, private_config):
"""
Update the private config
:param private_config: content of the private configuration file
"""
try:
private_config_path = os.path.join(self.working_dir, "private-config.cfg")
if private_config is None:
private_config = ''
# We disallow erasing the private config file
if len(private_config) == 0 and os.path.exists(private_config_path):
return
with open(private_config_path, 'w+', encoding='utf-8') as f:
if len(private_config) == 0:
f.write('')
else:
private_config = private_config.replace("%h", self._name)
f.write(private_config)
except OSError as e:
raise IOUError("Can't write private-config file '{}': {}".format(private_config_path, e)) | 0.00528 |
def _code_line(self, line):
"""Add a code line."""
assert self._containers
container = self._containers[-1]
# Handle extra spaces.
text = line
while text:
if text.startswith(' '):
r = re.match(r'(^ +)', text)
n = len(r.group(1))
container.addElement(S(c=n))
text = text[n:]
elif ' ' in text:
assert not text.startswith(' ')
i = text.index(' ')
container.addElement(Span(text=text[:i]))
text = text[i:]
else:
container.addElement(Span(text=text))
text = '' | 0.002849 |
def shell_exec(command, **kwargs): # from gitapi.py
"""Excecutes the given command silently.
"""
proc = Popen(shlex.split(command), stdout=PIPE, stderr=PIPE, **kwargs)
out, err = [x.decode("utf-8") for x in proc.communicate()]
return {'out': out, 'err': err, 'code': proc.returncode} | 0.023649 |
def create(self, pools):
"""
Method to create pool's
:param pools: List containing pool's desired to be created on database
:return: None
"""
data = {'server_pools': pools}
return super(ApiPool, self).post('api/v3/pool/', data) | 0.007018 |
def assigned(self, user, include=None):
"""
Retrieve the assigned tickets for this user.
:param include: list of objects to sideload. `Side-loading API Docs
<https://developer.zendesk.com/rest_api/docs/core/side_loading>`__.
:param user: User object or id
"""
return self._query_zendesk(self.endpoint.assigned, 'ticket', id=user, include=include) | 0.009804 |
def _msg_name(self, container, max_width):
"""Build the container name."""
name = container['name']
if len(name) > max_width:
name = '_' + name[-max_width + 1:]
else:
name = name[:max_width]
return ' {:{width}}'.format(name, width=max_width) | 0.006557 |
def click_event(event):
"""On click, bring the cell under the cursor to Life"""
grid_x_coord = int(divmod(event.x, cell_size)[0])
grid_y_coord = int(divmod(event.y, cell_size)[0])
world[grid_x_coord][grid_y_coord].value = True
color = world[x][y].color_alive.get_as_hex()
canvas.itemconfig(canvas_grid[grid_x_coord][grid_y_coord], fill=color) | 0.002732 |
def minimizeSPSA(func, x0, args=(), bounds=None, niter=100, paired=True,
a=1.0, alpha=0.602, c=1.0, gamma=0.101,
disp=False, callback=None):
"""
Minimization of an objective function by a simultaneous perturbation
stochastic approximation algorithm.
This algorithm approximates the gradient of the function by finite differences
along stochastic directions Deltak. The elements of Deltak are drawn from
+- 1 with probability one half. The gradient is approximated from the
symmetric difference f(xk + ck*Deltak) - f(xk - ck*Deltak), where the evaluation
step size ck is scaled according ck = c/(k+1)**gamma.
The algorithm takes a step of size ak = a/(0.01*niter+k+1)**alpha along the
negative gradient.
See Spall, IEEE, 1998, 34, 817-823 for guidelines about how to choose the algorithm's
parameters (a, alpha, c, gamma).
Parameters
----------
func: callable
objective function to be minimized:
called as `func(x, *args)`,
if `paired=True`, then called with keyword argument `seed` additionally
x0: array-like
initial guess for parameters
args: tuple
extra arguments to be supplied to func
bounds: array-like
bounds on the variables
niter: int
number of iterations after which to terminate the algorithm
paired: boolean
calculate gradient for same random seeds
a: float
scaling parameter for step size
alpha: float
scaling exponent for step size
c: float
scaling parameter for evaluation step size
gamma: float
scaling exponent for evaluation step size
disp: boolean
whether to output status updates during the optimization
callback: callable
called after each iteration, as callback(xk), where xk are the current parameters
Returns
-------
`scipy.optimize.OptimizeResult` object
"""
A = 0.01 * niter
if bounds is not None:
bounds = np.asarray(bounds)
project = lambda x: np.clip(x, bounds[:, 0], bounds[:, 1])
if args is not None:
# freeze function arguments
def funcf(x, **kwargs):
return func(x, *args, **kwargs)
N = len(x0)
x = x0
for k in range(niter):
ak = a/(k+1.0+A)**alpha
ck = c/(k+1.0)**gamma
Deltak = np.random.choice([-1, 1], size=N)
fkwargs = dict()
if paired:
fkwargs['seed'] = np.random.randint(0, np.iinfo(np.uint32).max)
if bounds is None:
grad = (funcf(x + ck*Deltak, **fkwargs) - funcf(x - ck*Deltak, **fkwargs)) / (2*ck*Deltak)
x -= ak*grad
else:
# ensure evaluation points are feasible
xplus = project(x + ck*Deltak)
xminus = project(x - ck*Deltak)
grad = (funcf(xplus, **fkwargs) - funcf(xminus, **fkwargs)) / (xplus-xminus)
x = project(x - ak*grad)
# print 100 status updates if disp=True
if disp and (k % (niter//100)) == 0:
print(x)
if callback is not None:
callback(x)
message = 'terminated after reaching max number of iterations'
return OptimizeResult(fun=funcf(x), x=x, nit=niter, nfev=2*niter, message=message, success=True) | 0.003926 |
def _process_return_multi_z_attr(self, data, attr_names, var_names, sub_num_elems):
'''process and attach data from fortran_cdf.get_multi_*'''
# process data
for i, (attr_name, var_name, num_e) in enumerate(zip(attr_names, var_names, sub_num_elems)):
if var_name not in self.meta.keys():
self.meta[var_name] = {}
if num_e == 1:
self.meta[var_name][attr_name] = data[i, 0]
else:
if data[i].dtype == '|S1':
self.meta[var_name][attr_name] = ''.join(data[i, 0:num_e].astype('U')).rstrip()
else:
self.meta[var_name][attr_name] = data[i, 0:num_e] | 0.007092 |
def _get_bradcrack_data(bravais):
r"""Read Bradley--Cracknell k-points path from data file
Args:
bravais (str): Lattice code including orientation e.g. 'trig_p_c'
Returns:
dict: kpoint path and special point locations, formatted as e.g.::
{'kpoints': {'\Gamma': [0., 0., 0.], 'X': [0., 0.5, 0.], ...},
'path': [['\Gamma', 'X', ..., 'P'], ['H', 'N', ...]]}
"""
json_file = pkg_resources.resource_filename(__name__, 'bradcrack.json')
with open(json_file, 'r') as f:
bradcrack_data = load_json(f)
return bradcrack_data[bravais] | 0.003063 |
def price_name_stacks(name, namespace, block_height):
"""
Get a name's price in Stacks, regardless of whether or not
the namespace it was created in was created before Stacks
existed. This is because any name can be purchased with
Stacks. If the namespace price curve was meant for BTC
(per its version bits), then the BTC price will be converted
to the Stacks price.
Returns an integer (microStacks)
"""
if namespace['version'] in [NAMESPACE_VERSION_PAY_WITH_STACKS]:
# price curve already reflects Stacks prices
return price_name(name, namespace, block_height)
else:
# price curve reflects Bitcoin prices.
# convert to Stacks prices with (MICROSTACKS_PER_SATOSHI_NUM / MICROSTACKS_PER_SATOSHI_DEN) ratio
btc_price = price_name(name, namespace, block_height)
btc_price = int(btc_price)
return (btc_price * MICROSTACKS_PER_SATOSHI_NUM) / MICROSTACKS_PER_SATOSHI_DEN | 0.004124 |
def lists(self):
""" :class:`Lists feed <pypump.models.feed.Lists>` with all lists
owned by the person.
Example:
>>> for list in pump.me.lists:
... print(list)
...
Acquaintances
Family
Coworkers
Friends
"""
if self._lists is None:
self._lists = Lists(self.links['lists'], pypump=self._pump)
return self._lists | 0.004367 |
def partition_dumps(self):
"""Yeild a set of manifest object that parition the dumps.
Simply adds resources/files to a manifest until their are either the
the correct number of files or the size limit is exceeded, then yields
that manifest.
"""
manifest = self.manifest_class()
manifest_size = 0
manifest_files = 0
for resource in self.resources:
manifest.add(resource)
manifest_size += resource.length
manifest_files += 1
if (manifest_size >= self.max_size or
manifest_files >= self.max_files):
yield(manifest)
# Need to start a new manifest
manifest = self.manifest_class()
manifest_size = 0
manifest_files = 0
if (manifest_files > 0):
yield(manifest) | 0.002237 |
def as_nonlinear(self, params=None):
"""Return a `Model` equivalent to this object. The nonlinear solver is less
efficient, but lets you freeze parameters, compute uncertainties, etc.
If the `params` argument is provided, solve() will be called on the
returned object with those parameters. If it is `None` and this object
has parameters in `self.params`, those will be use. Otherwise, solve()
will not be called on the returned object.
"""
if params is None:
params = self.params
nlm = Model(None, self.data, self.invsigma)
nlm.set_func(lambda p, x: npoly.polyval(x, p),
self.pnames,
args=(self.x,))
if params is not None:
nlm.solve(params)
return nlm | 0.003667 |
def get(cls):
"""
Use the masking function (``sigprocmask(2)`` or ``pthread_sigmask(3)``)
to obtain the mask of blocked signals
:returns:
A fresh :class:`sigprocmask` object.
The returned object behaves as it was constructed with the list of
currently blocked signals, ``setmask=False`` and as if the
:meth:`block()` was immediately called.
That is, calling :meth:`unblock()` will will cause those signals not to
be blocked anymore while calling :meth:`block()` will re-block them (if
they were unblocked after this method returns).
"""
mask = sigset_t()
sigemptyset(mask)
cls._do_mask(0, None, mask)
signals = []
for sig_num in range(1, NSIG):
if sigismember(mask, sig_num):
signals.append(sig_num)
self = cls(signals)
self._is_active = True
self._old_mask = mask
return self | 0.002043 |
def get_fw_version():
"""
Try to get version of the framework. First try pkg_resources.require, if that fails
read from setup.py
:return: Version as str
"""
version = 'unknown'
try:
pkg = require(get_fw_name())[0]
except DistributionNotFound:
# Icetea is not installed. try to read version string from setup.py
try:
setup_path = os.path.abspath(os.path.dirname(__file__)+'/../..')
with open(os.path.join(setup_path, 'setup.py')) as setup_file:
lines = setup_file.readlines()
for line in lines:
match = re.search(r"VERSION = \"([\S]{5,})\"", line)
if match:
version = match.group(1)
break
except Exception: # pylint: disable=broad-except
pass
else:
version = "-rc".join(pkg.version.split("rc"))
return version | 0.002116 |
def get_decoder(encoding, *args, **kwargs):
"""
Returns a L{codec.Decoder} capable of decoding AMF[C{encoding}] streams.
@raise ValueError: Unknown C{encoding}.
"""
def _get_decoder_class():
if encoding == AMF0:
try:
from cpyamf import amf0
except ImportError:
from pyamf import amf0
return amf0.Decoder
elif encoding == AMF3:
try:
from cpyamf import amf3
except ImportError:
from pyamf import amf3
return amf3.Decoder
raise ValueError("Unknown encoding %r" % (encoding,))
return _get_decoder_class()(*args, **kwargs) | 0.001416 |
def grab_earliest(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]:
"""Grab the earliest data from the buffer, blocking until one is available."""
timeout = timeout if timeout is not None else 10.0
with self.__buffer_lock:
if len(self.__buffer) == 0:
done_event = threading.Event()
self.__done_events.append(done_event)
self.__buffer_lock.release()
done = done_event.wait(timeout)
self.__buffer_lock.acquire()
if not done:
raise Exception("Could not grab latest.")
return self.__buffer.pop(0) | 0.008798 |
def set_ipcsem_params(self, ftok=None, persistent=None):
"""Sets ipcsem lock engine params.
:param str|unicode ftok: Set the ipcsem key via ftok() for avoiding duplicates.
:param bool persistent: Do not remove ipcsem's on shutdown.
"""
self._set('ftok', ftok)
self._set('persistent-ipcsem', persistent, cast=bool)
return self._section | 0.007614 |
def cycle_canceling(self, display):
'''
API:
cycle_canceling(self, display)
Description:
Solves minimum cost feasible flow problem using cycle canceling
algorithm. Returns True when an optimal solution is found, returns
False otherwise. 'flow' attribute values of arcs should be
considered as junk when returned False.
Input:
display: Display method.
Pre:
(1) Arcs should have 'capacity' and 'cost' attribute.
(2) Nodes should have 'demand' attribute, this value should be
positive if the node is a supply node, negative if it is demand
node and 0 if it is transhipment node.
(3) graph should not have node 's' and 't'.
Post:
Changes 'flow' attributes of arcs.
Return:
Returns True when an optimal solution is found, returns False
otherwise.
'''
# find a feasible solution to flow problem
if not self.find_feasible_flow():
return False
# create residual graph
residual_g = self.create_residual_graph()
# identify a negative cycle in residual graph
ncycle = residual_g.get_negative_cycle()
# loop while residual graph has a negative cycle
while ncycle is not None:
# find capacity of cycle
cap = residual_g.find_cycle_capacity(ncycle)
# augment capacity amount along the cycle
self.augment_cycle(cap, ncycle)
# create residual graph
residual_g = self.create_residual_graph()
# identify next negative cycle
ncycle = residual_g.get_negative_cycle()
return True | 0.001131 |
def get_path_list(args, nni_config, trial_content, temp_nni_path):
'''get path list according to different platform'''
path_list, host_list = parse_log_path(args, trial_content)
platform = nni_config.get_config('experimentConfig').get('trainingServicePlatform')
if platform == 'local':
print_normal('Log path: %s' % ' '.join(path_list))
return path_list
elif platform == 'remote':
path_list = copy_data_from_remote(args, nni_config, trial_content, path_list, host_list, temp_nni_path)
print_normal('Log path: %s' % ' '.join(path_list))
return path_list
else:
print_error('Not supported platform!')
exit(1) | 0.00438 |
def _get_lottery_detail_by_id(self, id):
"""
相应彩种历史信息生成
百度详细信息页有两种结构,需要分开处理
"""
header = '编号 期号 开奖日期 开奖号码'.split()
pt = PrettyTable()
pt._set_field_names(header)
url = QUERY_DETAIL_URL.format(id=id)
import requests
content = requests.get(url).text
d = pq(content)
if d('table.historylist'):
# 输出彩种
info = d('div.historyHd1 h2').text()
print(info)
# 输出table
rows = d('table.historylist>tbody>tr')
for idx, row in enumerate(rows):
i = pq(row)
qh = i('td:eq(0)').text().strip()
kjrq = i('td:eq(1)').text().strip()
hm_r = colored.red(i('td:eq(2) td.redBalls').text().strip())
hm_g = colored.green(i('td:eq(2) td.blueBalls').text().strip())
kjhm = ' '.join([hm_r, hm_g])
item = [idx + 1, qh, kjrq, kjhm]
pt.add_row(item)
print(pt)
elif d('table#draw_list'):
# 输出彩种
info = d('div.cpinfo>div.title').text()
print(info)
# 输出table
rows = d('table#draw_list>tbody>tr')
for idx, row in enumerate(rows):
i = pq(row)
qh = i('td.td2').text().strip()
kjrq = i('td.td1').text().strip()
hm_r = colored.red(i('td.td3 span.ball_1').text().strip())
hm_g = colored.green(i('td.td3 span.ball_2').text().strip())
kjhm = ' '.join([hm_r, hm_g])
item = [idx + 1, qh, kjrq, kjhm]
pt.add_row(item)
print(pt)
else:
print('请联系作者') | 0.001144 |
def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
results = []
cache = {}
# SA works with real parameter values and does not need scaling
tuning_options["scaling"] = False
args = (kernel_options, tuning_options, runner, results, cache)
tune_params = tuning_options.tune_params
# optimization parameters
T = 1.0
T_min = 0.001
alpha = 0.9
niter = 20
# generate random starting point and evaluate cost
pos = []
for i, _ in enumerate(tune_params.keys()):
pos.append(random_val(i, tune_params))
old_cost = _cost_func(pos, *args)
if tuning_options.verbose:
c = 0
# main optimization loop
while T > T_min:
if tuning_options.verbose:
print("iteration: ", c, "T", T, "cost: ", old_cost)
c += 1
for i in range(niter):
new_pos = neighbor(pos, tune_params)
new_cost = _cost_func(new_pos, *args)
ap = acceptance_prob(old_cost, new_cost, T)
r = random.random()
if ap > r:
if tuning_options.verbose:
print("new position accepted", new_pos, new_cost, 'old:', pos, old_cost, 'ap', ap, 'r', r, 'T', T)
pos = new_pos
old_cost = new_cost
T = T * alpha
return results, runner.dev.get_environment() | 0.001365 |
def attribute_is_visible(self, permission):
""" Checks privacy options to see if an attribute is visible to public
"""
try:
parent = getattr(self, "parent_{}".format(permission))
student = getattr(self, "self_{}".format(permission))
return (parent and student) or (self.is_http_request_sender() or self._current_user_override())
except Exception:
logger.error("Could not retrieve permissions for {}".format(permission)) | 0.008016 |
def search(self, evaluation, data):
"""
Performs the search and returns the indices of the selected attributes.
:param evaluation: the evaluation algorithm to use
:type evaluation: ASEvaluation
:param data: the data to use
:type data: Instances
:return: the selected attributes (0-based indices)
:rtype: ndarray
"""
array = javabridge.call(
self.jobject, "search", "(Lweka/attributeSelection/ASEvaluation;Lweka/core/Instances;)[I",
evaluation.jobject, data.jobject)
if array is None:
return None
else:
javabridge.get_env().get_int_array_elements(array) | 0.004304 |
def get_worksheets_section(self):
""" Returns the section dictionary related with Worksheets,
that contains some informative panels (like
WS to be verified, WS with results pending, etc.)
"""
out = []
bc = getToolByName(self.context, CATALOG_WORKSHEET_LISTING)
query = {'portal_type': "Worksheet", }
# Check if dashboard_cookie contains any values to query
# elements by
query = self._update_criteria_with_filters(query, 'worksheets')
# Active Worksheets (all)
total = self.search_count(query, bc.id)
# Open worksheets
name = _('Results pending')
desc = _('Results pending')
purl = 'worksheets?list_review_state=open'
query['review_state'] = ['open', 'attachment_due']
out.append(self._getStatistics(name, desc, purl, bc, query, total))
# Worksheets to be verified
name = _('To be verified')
desc = _('To be verified')
purl = 'worksheets?list_review_state=to_be_verified'
query['review_state'] = ['to_be_verified', ]
out.append(self._getStatistics(name, desc, purl, bc, query, total))
# Worksheets verified
name = _('Verified')
desc = _('Verified')
purl = 'worksheets?list_review_state=verified'
query['review_state'] = ['verified', ]
out.append(self._getStatistics(name, desc, purl, bc, query, total))
# Chart with the evolution of WSs over a period, grouped by
# periodicity
outevo = self.fill_dates_evo(bc, query)
out.append({'type': 'bar-chart-panel',
'name': _('Evolution of Worksheets'),
'class': 'informative',
'description': _('Evolution of Worksheets'),
'data': json.dumps(outevo),
'datacolors': json.dumps(self.get_colors_palette())})
return {'id': 'worksheets',
'title': _('Worksheets'),
'panels': out} | 0.000964 |
def get(key, default='', merge=False, delimiter=DEFAULT_TARGET_DELIM):
'''
.. versionadded:: 0.14
Attempt to retrieve the named value from pillar, if the named value is not
available return the passed default. The default return is an empty string.
If the merge parameter is set to ``True``, the default will be recursively
merged into the returned pillar data.
The value can also represent a value in a nested dict using a ":" delimiter
for the dict. This means that if a dict in pillar looks like this::
{'pkg': {'apache': 'httpd'}}
To retrieve the value associated with the apache key in the pkg dict this
key can be passed::
pkg:apache
merge
Specify whether or not the retrieved values should be recursively
merged into the passed default.
.. versionadded:: 2015.5.0
delimiter
Specify an alternate delimiter to use when traversing a nested dict
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' pillar.get pkg:apache
'''
if merge:
ret = salt.utils.data.traverse_dict_and_list(__pillar__, key, {}, delimiter)
if isinstance(ret, collections.Mapping) and \
isinstance(default, collections.Mapping):
return salt.utils.dictupdate.update(default, ret)
return salt.utils.data.traverse_dict_and_list(
__pillar__,
key,
default,
delimiter) | 0.001356 |
def _get_attachment_data(self, id, filename):
"""
Retrieve the contents of a specific attachment (identified by filename).
"""
uri = '/'.join([self.base_url, self.name, id, 'Attachments', filename])
return uri, {}, 'get', None, None, False | 0.010753 |
def raster_to_shape(raster):
"""Take a raster and return a polygon representing the outer edge."""
left = raster.bounds.left
right = raster.bounds.right
top = raster.bounds.top
bottom = raster.bounds.bottom
top_left = (left, top)
top_right = (right, top)
bottom_left = (left, bottom)
bottom_right = (right, bottom)
return Polygon((
top_left, top_right, bottom_right, bottom_left, top_left,
)) | 0.002242 |
def _split_field_to_single_value(field):
"""Convert number field values split by a '/' to a single number value."""
split_field = re.match(r'(\d+)/\d+', field)
return split_field.group(1) or field | 0.0199 |
def summarization(self, summarization):
"""Sets the summarization of this Chart.
Summarization strategy for the chart. MEAN is default # noqa: E501
:param summarization: The summarization of this Chart. # noqa: E501
:type: str
"""
allowed_values = ["MEAN", "MEDIAN", "MIN", "MAX", "SUM", "COUNT", "LAST", "FIRST"] # noqa: E501
if summarization not in allowed_values:
raise ValueError(
"Invalid value for `summarization` ({0}), must be one of {1}" # noqa: E501
.format(summarization, allowed_values)
)
self._summarization = summarization | 0.003012 |
def run_from_argv(self, argv):
"""
Called by the system when executing the command from the command line.
This should not be overridden.
:param argv: Arguments from command line
"""
parser = self.create_parser(argv[0], argv[1])
options = parser.parse_args(argv[2:])
cmd_options = vars(options)
args = cmd_options.pop('args', ())
self.handle(*args, **cmd_options) | 0.004405 |
def subproc_map(self, f, items):
"""Map function `f` over `items` in subprocesses and return the result.
:API: public
:param f: A multiproc-friendly (importable) work function.
:param items: A iterable of pickleable arguments to f.
"""
try:
# Pool.map (and async_map().get() w/o timeout) can miss SIGINT.
# See: http://stackoverflow.com/a/1408476, http://bugs.python.org/issue8844
# Instead, we map_async(...), wait *with a timeout* until ready, then .get()
# NB: in 2.x, wait() with timeout wakes up often to check, burning CPU. Oh well.
res = SubprocPool.foreground().map_async(f, items)
while not res.ready():
res.wait(60) # Repeatedly wait for up to a minute.
if not res.ready():
self.log.debug('subproc_map result still not ready...')
return res.get()
except KeyboardInterrupt:
SubprocPool.shutdown(True)
raise | 0.015021 |
def updateCache(self, service, url, new_data, new_data_dt):
"""
:param new_data: a string representation of the data
:param new_data_dt: a timezone aware datetime object giving
the timestamp of the new_data
:raise MemcachedException: if update failed
"""
key = self._get_key(service, url)
# clear existing data
try:
value = self.client.get(key)
if value:
data = pickle.loads(value, encoding="utf8")
if "time_stamp" in data:
cached_data_dt = parse(data["time_stamp"])
if new_data_dt > cached_data_dt:
self.client.delete(key)
# may raise MemcachedException
logger.info(
"IN cache (key: {}), older DELETE".format(key))
else:
logger.info(
"IN cache (key: {}), newer KEEP".format(key))
return
else:
logger.info("NOT IN cache (key: {})".format(key))
except MemcachedException as ex:
logger.error(
"Clear existing data (key: {}) ==> {}".format(key, str(ex)))
return
# store new value in cache
cdata, time_to_store = self._make_cache_data(
service, url, new_data, {}, 200, new_data_dt)
self.client.set(key, cdata, time=time_to_store)
# may raise MemcachedException
logger.info(
"MemCached SET (key {}) for {:d} seconds".format(
key, time_to_store)) | 0.001196 |
def process_module(self, node):
"""Process the astroid node stream."""
if self.config.file_header:
if sys.version_info[0] < 3:
pattern = re.compile(
'\A' + self.config.file_header, re.LOCALE | re.MULTILINE)
else:
# The use of re.LOCALE is discouraged in python 3
pattern = re.compile(
'\A' + self.config.file_header, re.MULTILINE)
content = None
with node.stream() as stream:
# Explicit decoding required by python 3
content = stream.read().decode('utf-8')
matches = pattern.findall(content)
if len(matches) != 1:
self.add_message('invalid-file-header', 1,
args=self.config.file_header) | 0.004728 |
def handle_starttag(self, tag, attrs):
"""Return representation of html start tag and attributes."""
if tag in self.mathml_elements:
final_attr = ""
for key, value in attrs:
final_attr += ' {0}="{1}"'.format(key, value)
self.fed.append("<{0}{1}>".format(tag, final_attr)) | 0.0059 |
def gcg(a, b, M, reg1, reg2, f, df, G0=None, numItermax=10,
numInnerItermax=200, stopThr=1e-9, verbose=False, log=False):
"""
Solve the general regularized OT problem with the generalized conditional gradient
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,M>_F + reg1\cdot\Omega(\gamma) + reg2\cdot f(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- M is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- :math:`f` is the regularization term ( and df is its gradient)
- a and b are source and target weights (sum to 1)
The algorithm used for solving the problem is the generalized conditional gradient as discussed in [5,7]_
Parameters
----------
a : np.ndarray (ns,)
samples weights in the source domain
b : np.ndarray (nt,)
samples in the target domain
M : np.ndarray (ns,nt)
loss matrix
reg1 : float
Entropic Regularization term >0
reg2 : float
Second Regularization term >0
G0 : np.ndarray (ns,nt), optional
initial guess (default is indep joint density)
numItermax : int, optional
Max number of iterations
numInnerItermax : int, optional
Max number of iterations of Sinkhorn
stopThr : float, optional
Stop threshol on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (ns x nt) ndarray
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
.. [5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, "Optimal Transport for Domain Adaptation," in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1
.. [7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized conditional gradient: analysis of convergence and applications. arXiv preprint arXiv:1510.06567.
See Also
--------
ot.optim.cg : conditional gradient
"""
loop = 1
if log:
log = {'loss': []}
if G0 is None:
G = np.outer(a, b)
else:
G = G0
def cost(G):
return np.sum(M * G) + reg1 * np.sum(G * np.log(G)) + reg2 * f(G)
f_val = cost(G)
if log:
log['loss'].append(f_val)
it = 0
if verbose:
print('{:5s}|{:12s}|{:8s}'.format(
'It.', 'Loss', 'Delta loss') + '\n' + '-' * 32)
print('{:5d}|{:8e}|{:8e}'.format(it, f_val, 0))
while loop:
it += 1
old_fval = f_val
# problem linearization
Mi = M + reg2 * df(G)
# solve linear program with Sinkhorn
# Gc = sinkhorn_stabilized(a,b, Mi, reg1, numItermax = numInnerItermax)
Gc = sinkhorn(a, b, Mi, reg1, numItermax=numInnerItermax)
deltaG = Gc - G
# line search
dcost = Mi + reg1 * (1 + np.log(G)) # ??
alpha, fc, f_val = line_search_armijo(cost, G, deltaG, dcost, f_val)
G = G + alpha * deltaG
# test convergence
if it >= numItermax:
loop = 0
delta_fval = (f_val - old_fval) / abs(f_val)
if abs(delta_fval) < stopThr:
loop = 0
if log:
log['loss'].append(f_val)
if verbose:
if it % 20 == 0:
print('{:5s}|{:12s}|{:8s}'.format(
'It.', 'Loss', 'Delta loss') + '\n' + '-' * 32)
print('{:5d}|{:8e}|{:8e}'.format(it, f_val, delta_fval))
if log:
return G, log
else:
return G | 0.007006 |
def is_error_retryable(request, exc):
"""
Return ``True`` if the exception is recognized as :term:`retryable error`.
This will return ``False`` if the request is on its last attempt.
This will return ``False`` if ``pyramid_retry`` is inactive for the
request.
"""
if is_last_attempt(request):
return False
return (
isinstance(exc, RetryableException)
or IRetryableError.providedBy(exc)
) | 0.002222 |
def _get_converter(self, convert_to=None):
'''see convert and save. This is a helper function that returns
the proper conversion function, but doesn't call it. We do this
so that in the case of convert, we do the conversion and return
a string. In the case of save, we save the recipe to file for the
user.
Parameters
==========
convert_to: a string either docker or singularity, if a different
Returns
=======
converter: the function to do the conversion
'''
conversion = self._get_conversion_type(convert_to)
# Perform conversion
if conversion == "singularity":
return self.docker2singularity
return self.singularity2docker | 0.004994 |
def read_validate_params(self, request):
"""
Reads and validates data in an incoming request as required by
the Authorization Request of the Authorization Code Grant and the
Implicit Grant.
"""
self.client = self.client_authenticator.by_identifier(request)
response_type = request.get_param("response_type")
if self.client.response_type_supported(response_type) is False:
raise OAuthInvalidError(error="unauthorized_client")
self.state = request.get_param("state")
self.scope_handler.parse(request, "query")
return True | 0.003205 |
def update(self, other, copy=True, *args, **kwargs):
"""Update this element related to other element.
:param other: same type than this.
:param bool copy: copy other before update attributes.
:param tuple args: copy args.
:param dict kwargs: copy kwargs.
:return: this"""
if other: # dirty hack for python2.6
if isinstance(other, self.__class__):
if copy:
other = other.copy(*args, **kwargs)
for slot in other.__slots__:
attr = getattr(other, slot)
if attr is not None:
setattr(self, slot, attr)
else:
raise TypeError(
'Wrong element to update with {0}: {1}'.format(self, other)
)
return self | 0.002336 |
def __query_safebrowsing(self, search_value, search_type):
"""
The actual query to safebrowsing api
:param search_value: value to search for
:type search_value: str
:param search_type: 'url' or 'ip'
:type search_type: str
:return: Results
:rtype: str
"""
return json.loads(
self.session.post(
self.url,
json=self.__prepare_body(
search_value=search_value,
search_type=search_type
)
).text
) | 0.004754 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.