content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import csv
def load_testingData(tempTrainingVectors, tempTestingVectors):
"""
TODO: Merge load_testingData() and load_trainingData() functions
This reads file DSL-StrongPasswordData.csv and returns the testing data in
an ndarray of shape tempTestingVectors*noOfFeatures and target ndarray
of shape (tempTestingVectors*noOfTotalClasses)*1.
"""
dataset = np.empty([0,noOfFeatures])
target = np.empty(0)
file = open(datasetPath)
reader = csv.reader(file)
reader.next()
for i in range(noOfTotalClasses):
# for i in range(noOfTotalClasses+1): # Skip s002
# if i==0:
# for j in range(noOfTotalVectors):
# tempData = reader.next() # Discard one vector
# continue
for j in range(tempTrainingVectors): # Discard training vectors now
tempData = reader.next() # Discard one vector
for j in range(tempTestingVectors):
tempData = reader.next() # Read one vector
currentSubject = tempData[0] # Save subject's name
for k in range(3): # Discard first 3 values
del tempData[0]
tempData = map(float, tempData)
tempData = np.array(tempData, ndmin=2)
dataset = np.append(dataset, tempData, axis=0)
target = np.append(target, [currentSubject], axis=0)
# Discard the rest of the unused vectors now
for j in range(noOfTotalVectors - tempTrainingVectors - tempTestingVectors):
tempData = reader.next() # Discard one vector
return dataset,target | bffd94e65017c1ab1ab830f1af4d6256464a5d3a | 4,300 |
async def anext(*args):
"""Retrieve the next item from the async generator by calling its __anext__() method.
If default is given, it is returned if the iterator is exhausted,
otherwise StopAsyncIteration is raised.
"""
if len(args) < 1:
raise TypeError(
f"anext expected at least 1 arguments, got {len(args)}")
aiterable, default, has_default = args[0], None, False
if len(args) > 2:
raise TypeError(f"anext expected at most 2 arguments, got {len(args)}")
if len(args) == 2:
default = args[1]
has_default = True
try:
return await aiterable.__anext__()
except (StopAsyncIteration, CancelledError) as exc:
if has_default:
return default
raise StopAsyncIteration() from exc | 60f71e2277501b1c274d691cef108937a2492147 | 4,301 |
def __prepare_arguments_for_d3_data(db_arguments, edge_type):
"""
:param db_arguments:
:param edge_type:
:return:
"""
all_ids = []
nodes = []
edges = []
extras = {}
LOG.debug("Enter private function to prepare arguments for d3")
# for each argument edges will be added as well as the premises
for argument in db_arguments:
counter = 1
# we have an argument with:
# 1) with one premise and no undercut is done on this argument
# 2) with at least two premises one conclusion or an undercut is done on this argument
db_premises = DBDiscussionSession.query(Premise).filter(Premise.premisegroup_uid == argument.premisegroup_uid,
Premise.is_disabled == False).all()
db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid=argument.uid).all()
# target of the edge (case 1) or last edge (case 2)
target = 'argument_' + str(argument.argument_uid)
if argument.conclusion_uid is not None:
target = 'statement_' + str(argument.conclusion_uid)
if len(db_premises) == 1 and len(db_undercuts) == 0:
__add_edge_to_dict(edges, argument, counter, db_premises[0], target, edge_type)
else:
__add_edge_and_node_to_dict(edges, nodes, all_ids, argument, counter, db_premises, target, edge_type)
return all_ids, nodes, edges, extras | 70bfc5a8196ce831f21935f0d429180aea7019a5 | 4,302 |
from typing import List
def get_service_gateway(client: VirtualNetworkClient = None,
compartment_id: str = None,
vcn_id: str = None) -> List[RouteTable]:
"""
Returns a complete, unfiltered list of Service Gateways of a vcn in the
compartment.
"""
service_gateway = []
service_gateway_raw = client.list_service_gateways(compartment_id=compartment_id,
vcn_id=vcn_id)
service_gateway.extend(service_gateway_raw.data)
while service_gateway_raw.has_next_page:
service_gateway_raw = client.list_service_gateways(
compartment_id=compartment_id,
vcn_id=vcn_id,
page=service_gateway_raw.next_page)
service_gateway.extend(service_gateway_raw.data)
return service_gateway | d1635dac2d6e8c64617eb066d114785674e9e8b3 | 4,303 |
def get_road_network_data(city='Mumbai'):
"""
"""
data = pd.read_csv("./RoadNetwork/"+city+"/"+city+"_Edgelist.csv")
size = data.shape[0]
X = np.array(data[['XCoord','YCoord']])
u, v = np.array(data['START_NODE'], dtype=np.int32), np.array(data['END_NODE'], dtype=np.int32)
w = np.array(data['LENGTH'], dtype=np.float64)
w = w/np.max(w) + 1e-6
G = sp.sparse.csr_matrix((w, (u,v)), shape = (size, size))
n, labels = sp.sparse.csgraph.connected_components(G)
if n == 1:
return G
# If there are more than one connected component, return the largest connected component
count_size_comp = np.bincount(labels)
z = np.argmax(count_size_comp)
indSelect = np.where(labels==z)
Gtmp = G[indSelect].transpose()[indSelect]
Gtmp = make_undirected(Gtmp)
return X[indSelect], Gtmp | 1d014e50b2d2883b5fda4aba9c2de5ca5e9dae2a | 4,304 |
from apps.jsonapp import JSONApp
def main_json(config, in_metadata, out_metadata):
"""
Alternative main function
-------------
This function launches the app using configuration written in
two json files: config.json and input_metadata.json.
"""
# 1. Instantiate and launch the App
logger.info("1. Instantiate and launch the App")
app = JSONApp()
result = app.launch(process_fastqc,
config,
in_metadata,
out_metadata)
# 2. The App has finished
logger.info("2. Execution finished; see " + out_metadata)
return result | 302ba667a775dd09a6779235a774a4a95f26af32 | 4,305 |
def _infection_active(state_old, state_new):
"""
Parameters
----------
state_old : dict or pd.Series
Dictionary or pd.Series with the keys "s", "i", and "r".
state_new : dict or pd.Series
Same type requirements as for the `state_old` argument in this function
apply.
Returns
-------
infection_active : bool
True if the event that occurred between `state_old` and `state_new` was
a transition from E to I. False otherwise.
"""
return state_new["s"] == state_old["s"] and \
state_new["e"] == state_old["e"] - 1 and \
state_new["i"] == state_old["i"] + 1 and \
state_new["r"] == state_old["r"] | a81376ee1853d34b1bc23b29b2a3e0f9d8741472 | 4,306 |
def isnum(txt):
"""Return True if @param txt, is float"""
try:
float(txt)
return True
except TypeError:
return False
except ValueError:
return False | c4f3aa3769810439d02312d0b616bd18c45a3da7 | 4,307 |
def xhr(func):
"""A decorator to check for CSRF on POST/PUT/DELETE using a <form>
element and JS to execute automatically (see #40 for a proof-of-concept).
When an attacker uses a <form> to downvote a comment, the browser *should*
add a `Content-Type: ...` header with three possible values:
* application/x-www-form-urlencoded
* multipart/form-data
* text/plain
If the header is not sent or requests `application/json`, the request is
not forged (XHR is restricted by CORS separately).
"""
def dec(self, env, req, *args, **kwargs):
if req.content_type and not req.content_type.startswith("application/json"):
raise Forbidden("CSRF")
return func(self, env, req, *args, **kwargs)
return dec | 9a721d5eaa90be92cc05aa565b7ceb57fd209c7d | 4,308 |
def updateStyle(style, **kwargs):
"""Update a copy of a dict or the dict for the given style"""
if not isinstance(style, dict):
style = getStyle(style) # look up the style by name
style = style.copy()
style.update(**kwargs)
return style | 57db589ff5b1b7d3e6eb3874826f77f5572c2aa7 | 4,309 |
def lift2(f, a, b):
"""Apply f => (a -> b -> c) -> f a -> f b -> f c"""
return a.map(f).apply_to(b) | 6b54cacf23ac4acb9bf9620cd259aa6f9630bbc3 | 4,310 |
def project_version(file_path=settings.PROJECT_VERSION_FILE):
"""Project version."""
try:
with open(file_path) as file_obj:
version = file_obj.read()
return parse_version(version)
except Exception:
pass
return None | 4dd4bc6d9ae4570fa1278a709d45d12e8a7a2f8e | 4,311 |
import numpy
def readBinary(fileName):
"""Read a binary FIXSRC file."""
with FIXSRC(fileName, "rb", numpy.zeros((0, 0, 0, 0))) as fs:
fs.readWrite()
return fs.fixSrc | 053c21a5cbc5b9c5b31eed94cfbea171d1a37d5e | 4,312 |
def short_whitelist(whitelist):
"""A condensed version of the whitelist."""
for x in ["guid-4", "guid-5"]:
whitelist.remove(x)
return whitelist | e0de5f4f8c86df301af03c9362b095f330bffc14 | 4,313 |
def extract_paths(actions):
"""
<Purpose>
Given a list of actions, it extracts all the absolute and relative paths
from all the actions.
<Arguments>
actions: A list of actions from a parsed trace
<Returns>
absolute_paths: a list with all absolute paths extracted from the actions
relative_paths: a list with all relative paths extracted from the actions
"""
absolute_paths = []
relative_paths = []
actions_with_path = ['open', 'creat', 'statfs', 'access', 'stat',
'link', 'unlink', 'chdir', 'rmdir', 'mkdir']
for action in actions:
# get the name of the syscall and remove the "_syscall" part at the end.
action_name = action[0][:action[0].find("_syscall")]
# we only care about actions containing paths
if action_name not in actions_with_path:
continue
# we only care about paths that exist
action_result = action[2]
if action_result == (-1, 'ENOENT'):
continue
path = action[1][0]
if path.startswith("/"):
if path not in absolute_paths:
absolute_paths.append(path)
else:
if path not in relative_paths:
relative_paths.append(path)
# get the second path of link
if action_name == "link":
path = action[1][1]
if path.startswith("/"):
if path not in absolute_paths:
absolute_paths.append(path)
else:
if path not in relative_paths:
relative_paths.append(path)
return absolute_paths, relative_paths | 94aaf8fef1a7c6d3efd8b04c980c9e87ee7ab4ff | 4,314 |
def str2int(s):
"""converts a string to an integer with the same bit pattern (little endian!!!)"""
r = 0
for c in s:
r <<= 8
r += ord(c)
return r | 0dd190b8711e29e12be8cc85a641d9a68251205b | 4,315 |
def load_operations_from_docstring(docstring):
"""Return a dictionary of OpenAPI operations parsed from a
a docstring.
"""
doc_data = load_yaml_from_docstring(docstring)
return {
key: val for key, val in iteritems(doc_data)
if key in PATH_KEYS or key.startswith('x-')
} | 6199a7e8b0c1cdb67f043e656d0797906fbf8bae | 4,316 |
import torch
def inference_model(model, img):
"""Inference image(s) with the classifier.
Args:
model (nn.Module): The loaded segmentor.
img (str/ndarray): The image filename or loaded image.
Returns:
result (list of dict): The segmentation results that contains: ...
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
if isinstance(img, str):
if cfg.data.test.pipeline[0]['type'] != 'LoadImageFromFile':
cfg.data.test.pipeline.insert(0, dict(type='LoadImageFromFile'))
data = dict(img_info=dict(filename=img), img_prefix=None)
else:
if cfg.data.test.pipeline[0]['type'] == 'LoadImageFromFile':
cfg.data.test.pipeline.pop(0)
data = dict(img=img)
test_pipeline = Compose(cfg.data.test.pipeline)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
# forward the model
with torch.no_grad():
scores = model(return_loss=False, **data)
model_out = scores[0]
m_shape = model_out.shape
mask = np.zeros([m_shape[0], m_shape[1], 3], dtype=np.uint8)
for i in range(3):
mask[:, :, i] = model_out
ann = sly.Annotation.from_img_path(img)
for idx, class_name in enumerate(model.CLASSES, 1): # curr_col2cls.items():
mask_bools = np.all(mask == idx, axis=2) # exact match (3-channel img & rgb color)
if mask_bools.sum() == 0:
# raise
continue
bitmap = sly.Bitmap(data=mask_bools)
obj_class = g.meta.get_obj_class(class_name)
# obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap)
ann = ann.add_label(sly.Label(bitmap, obj_class))
# clear used pixels in mask to check missing colors, see below
return ann.to_json() | 814ab13018245e51e5c32e43f2c6c67020d4e7dd | 4,317 |
def run_egad(go, nw, **kwargs):
"""EGAD running function
Wrapper to lower level functions for EGAD
EGAD measures modularity of gene lists in co-expression networks.
This was translated from the MATLAB version, which does tiled Cross Validation
The useful kwargs are:
int - nFold : Number of CV folds to do, default is 3,
int - {min,max}_count : limits for number of terms in each gene list, these are exclusive values
Arguments:
go {pd.DataFrame} -- dataframe of genes x terms of values [0,1], where 1 is included in gene lists
nw {pd.DataFrame} -- dataframe of co-expression network, genes x genes
**kwargs
Returns:
pd.DataFrame -- dataframe of terms x metrics where the metrics are
['AUC', 'AVG_NODE_DEGREE', 'DEGREE_NULL_AUC', 'P_Value']
"""
assert nw.shape[0] == nw.shape[1] , 'Network is not square'
assert np.all(nw.index == nw.columns) , 'Network index and columns are not in the same order'
nw_mask = nw.isna().sum(axis=1) != nw.shape[1]
nw = nw.loc[nw_mask, nw_mask].astype(float)
np.fill_diagonal(nw.values, 1)
return _runNV(go, nw, **kwargs) | 816a4c71830b0c576d045c3e413327b8927a7a5e | 4,318 |
def prepare_new():
"""
Handles a request to add a prepare project configuration. This is the first step in a two-step
process. This endpoint generates a form to request information about the new project from the
user. Once the form is submitted a request is sent to begin editing the new config.
"""
issues = {}
issue_list = github_call(
"GET",
f'repos/{app.config["GITHUB_ORG"]}/{editor_types["registry"]["repo"]}/issues',
params={"state": "open", "labels": "new ontology"},
)
for issue in issue_list:
number = issue["number"]
title = issue["title"]
logger.debug(f"Got issue: {number}, {title}")
issues[number] = title
return render_template(
"prepare_new_config.jinja2", login=g.user.github_login, issueList=issues
) | 5e103264f9bb2543a577fdb013c0fe641bd7ad14 | 4,319 |
def generic_plot(xy_curves, title, save_path, x_label=None, y_label=None, formatter=None, use_legend=True, use_grid=True, close=True, grid_spacing=20, yaxis_sci=False):
"""
:param xy_curves:
:param title:
:param x_label:
:param y_label:
:param formatter:
:param save_path:
:param use_legend:
:param use_grid:
:return:
"""
fig, ax = plt.subplots()
plt.title(title)
plt.grid(use_grid)
for curve in xy_curves:
if curve.color is not None:
ax.plot(curve.x, curve.y, curve.style, label=curve.label, color=curve.color)
else:
ax.plot(curve.x, curve.y, curve.style, label=curve.label)
if formatter is not None:
ax.xaxis.set_major_formatter(plt.FuncFormatter(format_xtick))
ax.xaxis.set_major_locator(MultipleLocator(grid_spacing))
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2e'))
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
#ax.margins(0.05)
if use_legend:
ax.legend()
"""if yaxis_sci:
ax.ticklabel_format(axis='y', style='sci', scilimits=(0,0), useOffset=None)"""
if save_path is not None:
plt.savefig(save_path, bbox_inches='tight', transparent=True)
if close:
plt.close('all')
return fig | f598578bfb6d63f9e9575223ecc25cdc9ace8082 | 4,320 |
from pathlib import Path
import io
def dump(module, path, **kwargs):
"""Serialize *module* as PVL text to the provided *path*.
:param module: a ``PVLModule`` or ``dict``-like object to serialize.
:param path: an :class:`os.PathLike`
:param ``**kwargs``: the keyword arguments to pass to :func:`dumps()`.
If *path* is an :class:`os.PathLike`, it will attempt to be opened
and the serialized module will be written into that file via
the :func:`pathlib.Path.write_text()` function, and will return
what that function returns.
If *path* is not an :class:`os.PathLike`, it will be assumed to be an
already-opened file object, and ``.write()`` will be applied
on that object to write the serialized module, and will return
what that function returns.
"""
try:
p = Path(path)
return p.write_text(dumps(module, **kwargs))
except TypeError:
# Not an os.PathLike, maybe it is an already-opened file object
try:
if isinstance(path, io.TextIOBase):
return path.write(dumps(module, **kwargs))
else:
return path.write(dumps(module, **kwargs).encode())
except AttributeError:
# Not a path, not an already-opened file.
raise TypeError(
"Expected an os.PathLike or an already-opened "
"file object for writing, but got neither."
) | 3c0d145883c4787ba0a69a3006ed5680dfba952b | 4,321 |
def get_user_labels(client: Client, *_):
""" Returns all user Labels
Args:
client: Client
"""
labels = client.get_user_labels_request()
contents = []
for label in labels:
contents.append({
'Label': label
})
context = {
'Exabeam.UserLabel(val.Label && val.Label === obj.Label)': contents
}
human_readable = tableToMarkdown('Exabeam User Labels', contents)
return human_readable, context, labels | a0ed0f71d2f39ef32a6fcdfd7586017e67957a6d | 4,322 |
def hdf5_sample(sample, request):
"""Fixture which provides the filename of a HDF5 tight-binding model."""
return sample(request.param) | 7d3320f5e4ce84bfa2cf0dbc1b5f321b2e3f6df8 | 4,323 |
def get_graph_embedding_features(fn='taxi_all.txt'):
"""
Get graph embedding vector, which is generated from LINE
"""
ge = []
with open(fn, 'r') as fin:
fin.readline()
for line in fin:
ls = line.strip().split(" ")
ge.append([float(i) for i in ls])
ge = np.array(ge)
ge = ge[np.argsort(ge[:,0])]
return ge[:,1:] | 5710714ad3dea46ee64cf9fcacdfbdfec37c8c1c | 4,324 |
def sub_0_tron(D, Obj, W0, eta=1e0, C=1.0, rtol=5e-2, atol=1e-4,
verbose=False):
"""Solve the Sub_0 problem with tron+cg is in lelm-imf."""
W, f_call = W0.copy(), (f_valp, f_grad, f_hess)
tron(f_call, W.reshape(-1), n_iterations=5, rtol=rtol, atol=atol,
args=(Obj, D, eta, C), verbose=verbose)
return W | 54b4ae6f884da13fe538da6c7d8cb38fc05e2e46 | 4,325 |
def find_flavor_name(nova_connection: NovaConnection, flavor_id: str):
"""
Find all flavor name from nova_connection with the id flavor_id
:param nova_connection: NovaConnection
:param flavor_id: str flavor id to find
:return: list of flavors name
"""
flavor_list = []
for flavor in nova_connection.connection.flavors.list():
flavor_info = dict(flavor.to_dict())
if 'id' in flavor_info and 'name' in flavor_info and flavor_info['id'] == flavor_id:
flavor_list.append(flavor_info['name'])
return flavor_list | c7252ad2b1f2f0676c4b2fed0304f6ddfe64f97f | 4,326 |
def entity_decode(txt):
"""decode simple entities"""
# TODO: find out what ones twitter considers defined,
# or if sgmllib.entitydefs is enough...
return txt.replace(">", ">").replace("<", "<").replace("&", "&") | 975dbd1b51773989455a16751f860af5b19f8fb5 | 4,327 |
def __format_focal_length_tuple(_tuple):
"""format FocalLenght tuple to short printable string
we ignore the position after the decimal point
because it is usually not very essential for focal length
"""
if (isinstance(_tuple,tuple)):
numerator = _tuple[0]
divisor = _tuple[1]
else:
numerator=_tuple.numerator
divisor=_tuple.denominator
if numerator == 0:
return get_zero_value_ersatz()
if numerator % 10 == 0 and divisor % 10 == 0:
# example: change 110/10 -> 11
numerator = numerator // 10
divisor = divisor // 10
if divisor == 1:
# example: change 8/1 to 8mm
_string = f"{numerator}mm"
else:
# example: 524/10 -> 52mm
# we ignore the position after the decimal point
# because it is usually not very essential for focal length
_string = f"{numerator//divisor}mm"
return _string | c27feee47f07558a822acaf57cd6a8a8a1a61c3f | 4,328 |
import json
def submit_resume_file(request):
""" Submit resume """
resume_file = request.FILES['json_file']
# print('resume file=%s' % resume_file)
file_content = resume_file.read()
data = json.loads(file_content.decode('utf-8'))
response = create_resume(data, request.user)
return response | 59f359d3c9c915f8ec228d3fb6ef17cc15ebac77 | 4,329 |
def inner_E_vals(vec):
"""
Returns a list of the terms in the expectation times without dividing
by the length or one minus length.\nThis is meant to be used in
conjunction with an inner-product of two inner_E_vals() lists to
compute variance or covariance.
"""
out = [None] * len(vec)
dm = data_mean(vec)
for i, item in enumerate(vec):
out[i] = item - dm
return(out) | a470674f899341c2ded1bdc682586274daa76ff0 | 4,330 |
def count_infected(pop):
"""
counts number of infected
"""
return sum(p.is_infected() for p in pop) | 03b3b96994cf4156dcbe352b9dafdd027de82d41 | 4,331 |
def esum(expr_iterable):
"""
Expression sum
:param term_iterable:
:return:
"""
var_dict = {}
constant = 0
for expr in expr_iterable:
for (var_name, coef) in expr.var_dict.items():
if coef not in var_dict:
var_dict[var_name] = coef
else:
var_dict[var_name] += coef
constant += expr.constant
return Expression.from_var_dict(
var_dict,
constant
) | e63d574c4442843febf66e0326780d6ffb3ba647 | 4,332 |
def _prompt(func, prompt):
"""Prompts user for data. This is for testing."""
return func(prompt) | c5e8964e5b3a3d0a222e167341a44d8953d1e0c1 | 4,333 |
def show_trace(func, *args, **kwargs):
# noinspection PyShadowingNames
"""
Display epic argument and context call information of given function.
>>> @show_trace
>>> def complex_function(a, b, c, **kwargs):
...
>>> complex_function('alpha', 'beta', False, debug=True)
calling haystack.submodule.complex_function with
args: ({'a': 'alpha', 'b': 'beta', 'c': False},)
kwargs: {'debug': True}
>>>
:param func: the decorated function
:param args: the positional args of the function
:param kwargs: the keyword args of the function
:return: the function return value
"""
func = Func(func)
print(
f"Calling {func.full_name} with: \n "
f"args: {args} \n "
f"kwargs: {kwargs}"
)
return func(*args, **kwargs) | 1111b5a61a357de8a094647b3a659e5097822ecb | 4,334 |
from pathlib import Path
from typing import List
import os
import subprocess
def record_editor(project_path: Path, debug: bool = False) -> List[Path]:
"""
Records ezvi running for each instructions file in a project.
The files to record a found using `fetch_project_editor_instructions()`.
Args:
project_path (Path): The path towards the project from which the
`ezvi` instructions will be recorded.
debug (bool, optional): Whether or not to use this function in
debug mode. Debug mode shows `ezvi`'s output on the user's terminal.
Defaults to False.
Returns:
List[Path]: A list of paths towards each recording that has been created.
"""
all_editor_instructions: List[Path] = fetch_project_editor_instructions(project_path)
all_editor_recordings: List[Path] = []
console: Console = Console()
with console.status("[bold green]Recording editor...") as status:
for instruction in all_editor_instructions:
save_path: Path = (
instruction.parent.parent / Path("asciicasts") / instruction.name
).with_suffix(".cast")
if save_path.exists():
os.remove(save_path)
subprocess.run(
["asciinema", "rec", "-c", f"runner {instruction}", str(save_path)],
capture_output=not debug,
)
console.log(f"Video contents in file {instruction} have been recorded.")
all_editor_recordings.append(save_path)
return all_editor_recordings | c603816c86bb18c7a18e1793f5068c2e23bf7cf0 | 4,335 |
def to_percent__xy(x, y):
"""
To percent with 2 decimal places by diving inputs.
:param x:
:param y:
:return:
"""
return '{:.2%}'.format(x / y) | 3d5cfcde6f1dbd65b99a4081790e03efb669ee02 | 4,336 |
def generic_constructor(value, name=None, strict=False, allow_downcast=None):
"""SharedVariable Constructor"""
return SharedVariable(type=generic, value=value, name=name, strict=strict,
allow_downcast=allow_downcast) | e4d168449099154ce936d49c18dfc1754a774115 | 4,337 |
import torch
def u_scheme(tree, neighbours):
"""Calculates the u-:ref:`scheme <presolve>`.
"""
unique_neighbours = torch.sort(neighbours, 1, descending=True).values
unique_neighbours[:, 1:][unique_neighbours[:, 1:] == unique_neighbours[:, :-1]] = -1
pairs = torch.stack([tree.id[:, None].expand_as(neighbours), unique_neighbours], -1)
pairs = pairs[(pairs >= 0).all(-1) & tree.terminal[pairs].all(-1)]
partner_is_larger = tree.depths[pairs[:, 0]] > tree.depths[pairs[:, 1]]
smaller_partners = torch.flip(pairs[partner_is_larger], (1,))
pairs = torch.cat([pairs, smaller_partners])
return ragged.from_pairs(pairs, len(tree.id), len(tree.id)) | 4b0727f6bbaa8121435b347100751928e6f0a348 | 4,338 |
def find_and_open_file(f):
"""
Looks in open windows for `f` and focuses the related view.
Opens file if not found. Returns associated view in both cases.
"""
for w in sublime.windows():
for v in w.views():
if normpath(f) == v.file_name():
w.focus_view(v)
return v
return sublime.active_window().open_file(f) | f300b76c9c4f4cf50e34490996d0f57feeb01728 | 4,339 |
def stochastic_fit(input_data: object) -> FitParams:
"""
Acquire parameters for the stochastic input signals.
"""
params = FitParams(0.000036906289210966747, 0.014081285145600045)
return params | b138b1b434c9a4270c6915d67d6fdca3434a59a5 | 4,340 |
from typing import Dict
from typing import Tuple
from typing import List
def sort_features_by_normalization(
normalization_parameters: Dict[int, NormalizationParameters]
) -> Tuple[List[int], List[int]]:
"""
Helper function to return a sorted list from a normalization map.
Also returns the starting index for each feature type"""
# Sort features by feature type
sorted_features: List[int] = []
feature_starts: List[int] = []
assert isinstance(
list(normalization_parameters.keys())[0], str
), "Normalization Parameters need to be str"
for feature_type in identify_types.FEATURE_TYPES:
feature_starts.append(len(sorted_features))
for feature in sorted(normalization_parameters.keys()):
norm = normalization_parameters[feature]
if norm.feature_type == feature_type:
sorted_features.append(feature)
return sorted_features, feature_starts | 7beca199dc71e43fcf9f5c8870ee3f450a116e86 | 4,341 |
def block_sort():
"""
Do from here: https://en.wikipedia.org/wiki/Block_sort
:return: None
"""
return None | 69143373200b0dfc560404c12d1500988869a688 | 4,342 |
def prep_ground_truth(paths, box_data, qgt):
"""adds dbidx column to box data, sets dbidx in qgt and sorts qgt by dbidx
"""
orig_box_data = box_data
orig_qgt = qgt
path2idx = dict(zip(paths, range(len(paths))))
mapfun = lambda x : path2idx.get(x,-1)
box_data = box_data.assign(dbidx=box_data.file_path.map(mapfun).astype('int'))
box_data = box_data[box_data.dbidx >= 0].reset_index(drop=True)
new_ids = qgt.index.map(mapfun)
qgt = qgt[new_ids >= 0]
qgt = qgt.set_index(new_ids[new_ids >= 0])
qgt = qgt.sort_index()
## Add entries for files with no labels...
qgt = qgt.reindex(np.arange(len(paths))) # na values will be ignored...
assert len(paths) == qgt.shape[0], 'every path should be in the ground truth'
return box_data, qgt | 6c01fb121933d5fdf235948136ffc73e08e7d6ee | 4,343 |
def top_1_pct_share(df, col, w=None):
"""Calculates top 1% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 1%.
"""
return top_x_pct_share(df, col, 0.01, w) | 9fca3e9fdd1c69bda3a96111ca110791adb729be | 4,344 |
def containsdupevalues(structure) -> bool or None:
"""Returns True if the passed dict has duplicate items/values, False otherwise. If the passed structure is not a dict, returns None."""
if isinstance(structure, dict):
# fast check for dupe keys
rev_dict = {}
for key, value in structure.items():
rev_dict.setdefault(value, set()).add(key)
dupes = list(filter(lambda x: len(x) > 1, rev_dict.values()))
if dupes:
return True
else:
return False
return None | 4d3c72e71740e69a13889cef816fc4c00ead5790 | 4,345 |
import re
def only_letters(answer):
"""Checks if the string contains alpha-numeric characters
Args:
answer (string):
Returns:
bool:
"""
match = re.match("^[a-z0-9]*$", answer)
return bool(match) | 32c8905294f6794f09bb7ea81ed7dd4b6bab6dc5 | 4,346 |
def is_finally_visible_func(*args):
"""
is_finally_visible_func(pfn) -> bool
Is the function visible (event after considering 'SCF_SHHID_FUNC' )?
@param pfn (C++: func_t *)
"""
return _ida_funcs.is_finally_visible_func(*args) | 468687af0bafb42887f8e43453a4e6c641abde5e | 4,347 |
def _losetup_list():
"""
List all the loopback devices on the system.
:returns: A ``list`` of
2-tuple(FilePath(device_file), FilePath(backing_file))
"""
output = check_output(
["losetup", "--all"]
).decode('utf8')
return _losetup_list_parse(output) | 00ad4bdb76e22f44da50b35a296d43c5678698ce | 4,348 |
def gaussian_product_center(a,A,b,B):
"""
"""
A = np.array(A)
B = np.array(B)
return (a*A+b*B)/(a+b) | a52828d72f99bef8f666d1dbd33ee8d748e0b543 | 4,349 |
from yaml import YAMLError
def read_yaml_file(yaml_path):
"""Loads a YAML file.
:param yaml_path: the path to the yaml file.
:return: YAML file parsed content.
"""
if is_file(yaml_path):
try:
file_content = sudo_read(yaml_path)
yaml = YAML(typ='safe', pure=True)
return yaml.safe_load(file_content)
except YAMLError as e:
raise YAMLError('Failed to load yaml file {0}, due to {1}'
''.format(yaml_path, str(e)))
return None | 7739f55b4b872392ddad4d5184fa718d8c1daa5e | 4,350 |
def _InUse(resource):
"""All the secret names (local names & remote aliases) in use.
Args:
resource: Revision
Returns:
List of local names and remote aliases.
"""
return ([
source.secretName
for source in resource.template.volumes.secrets.values()
] + [
source.secretKeyRef.name
for source in resource.template.env_vars.secrets.values()
]) | cf13ccf1d0fffcd64ac8b3a40ac19fdb2b1d12c5 | 4,351 |
def filter_dwnmut(gene_data):
"""Removes the variants upstream to Frameshift/StopGain mutation.
Args:
- gene_data(dictionary): gene_transcript wise variants where
there is at least one Frameshift/Stopgain
mutation.
Returns:
- flt_data(dictionary): gene_transcript wise variants where there
is at least one Frameshift/StopGain mutation
and at least one downstream coding exonic
variant.
"""
rfgene = Refgene()
flt_gene_data = {}
for gene_info, val in gene_data.items():
trans_id = gene_info[1]
strand = rfgene.get_strand(trans_id)
if not strand:
continue
for e in val:
t = {}
variants = e.keys()
if strand == '+':
variants.sort()
elif strand == '-':
variants.sort(reverse=True)
size = 0
mut_type = ''
flag = False
for var in variants:
if flag == False and e[var][0] == 'StopGain':
mut_type = 'StopGain'
t[tuple(list(var) + ['#'])] = e[var]
flag = True
elif flag == False and e[var][0].startswith('FrameShift'):
if e[var][0][10:] == 'Insert':
size += len(var[4]) - 1
elif e[var][0][10:] == 'Delete':
size -= len(var[3]) - 1
t[tuple(list(var) + ['#'])] = e[var]
flag = True
elif flag == True:
if mut_type == 'StopGain':
t[var] = e[var]
elif e[var][0].startswith('FrameShift'):
if e[var][0][10:] == 'Insert':
size += len(var[4]) - 1
elif e[var][0][10:] == 'Delete':
size -= len(var[3]) - 1
t[var] = e[var]
if size == 0 or divmod(size, 3)[1] == 0:
flag = False
elif e[var][0].startswith('NonFrameShift'):
if e[var][0][13:] == 'Insert':
size += len(var[4]) - 1
elif e[var][0][13:] == 'Delete':
size -= len(var[3]) - 1
t[var] = e[var]
if size == 0 or divmod(size, 3)[1] == 0:
flag = False
else:
t[var] = e[var]
if len(t) > 1:
key = tuple(list(gene_info) + [strand])
if key not in flt_gene_data:
flt_gene_data[key] = [t]
else:
if t != flt_gene_data[key][0]:
flt_gene_data[key].append(t)
return flt_gene_data | 9573e5cbd0ed3f96f8e7f47fa395476cc7bd513b | 4,352 |
def format_scwgbs_file(file_path):
"""
Format a scwgbs file to a more usable manner
:param file_path: The path of the file to format
:type file_path: str
:return: A dict where each key is a chr and the value is an array with all the scwgbs reads
:rtype: dict
"""
chr_dict = extract_cols(file_path)
chr_dict = combine_strands(chr_dict)
return chr_dict | eaf2925e3f634138ba8eb7bf1f61189d30f86d7c | 4,353 |
import torch
def ls_generator_loss(scores_fake):
"""
Computes the Least-Squares GAN loss for the generator.
Inputs:
- scores_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.
Outputs:
- loss: A PyTorch Tensor containing the loss.
"""
loss = None
####################################
# YOUR CODE HERE #
####################################
labels = torch.ones_like(scores_fake)
loss = 1/2 * mse_loss(scores_fake, labels, reduction = 'mean')
########## END ##########
return loss | 6b6d1b94e13de514e56fe83764869b8c2948a40a | 4,354 |
def rmsd(predicted, reference):
"""
Calculate root-mean-square deviation (RMSD) between two variables.
Calculates the root-mean-square deviation between two variables
PREDICTED and REFERENCE. The RMSD is calculated using the
formula:
RMSD^2 = sum_(n=1)^N [(p_n - r_n)^2]/N
where p is the predicted values, r is the reference values, and
N is the total number of values in p & r. Note that p & r must
have the same number of values.
Input:
PREDICTED : predicted values
REFERENCE : reference values
Output:
R : root-mean-square deviation (RMSD)
Author: Peter A. Rochford
Symplectic, LLC
www.thesymplectic.com
[email protected]
Created on Dec 9, 2016
"""
# Check that dimensions of predicted and reference fields match
utils.check_arrays(predicted, reference)
# Calculate the RMSE
r = np.sqrt(np.sum(np.square(predicted - reference)) / len(predicted))
return r | 5903c0a900b6f66bddd640f9c65146a08e0b768d | 4,355 |
from typing import List
def _get_ranks_for_sequence(logits: np.ndarray,
labels: np.ndarray) -> List[float]:
"""Returns ranks for a sequence.
Args:
logits: Logits of a single sequence, dim = (num_tokens, vocab_size).
labels: Target labels of a single sequence, dim = (num_tokens, 1).
Returns:
An array of ranks for tokens in the sequence, dim = (num_tokens, 1).
"""
sequence_ranks = []
for logit, label in zip(logits, labels.astype(int)):
rank = rankdata(-logit, method='min')[label] - 1.0
sequence_ranks.append(rank)
return sequence_ranks | 59cb646f5f4f498f3bfe1c3c01672ce61e124428 | 4,356 |
import torch
import time
def eval_model_on_grid(model, bbox, tx, voxel_grid_size, cell_vox_min=None, cell_vox_max=None, print_message=True):
"""
Evaluate the trained model (output of fit_model_to_pointcloud) on a voxel grid.
:param model: The trained model returned from fit_model_to_pointcloud
:param bbox: The bounding box defining the region of space on which to evaluate the model
(represented as the pair (origin, size))
:param tx: An affine transformation which transforms points in world coordinates to model
coordinates before evaluating the model (the second return value of fit_model_to_grid).
The transformation is represented as a tuple (t, s) where t is a translation and s is scale.
:param voxel_grid_size: The size of the voxel grid on which to reconstruct
:param cell_vox_min: If not None, reconstruct on the subset of the voxel grid starting at these indices.
:param cell_vox_max: If not None, reconstruct on the subset of the voxel grid ending at these indices.
:param print_message: If true, print status messages to stdout.
:return: A tensor representing the model evaluated on a grid.
"""
bbox_origin, bbox_size = bbox
voxel_size = bbox_size / voxel_grid_size # size of a single voxel cell
if cell_vox_min is None:
cell_vox_min = torch.tensor([0, 0, 0], dtype=torch.int32)
if cell_vox_max is None:
cell_vox_max = voxel_grid_size
if print_message:
print(f"Evaluating model on grid of size {[_.item() for _ in (cell_vox_max - cell_vox_min)]}.")
eval_start_time = time.time()
xmin = bbox_origin + (cell_vox_min + 0.5) * voxel_size
xmax = bbox_origin + (cell_vox_max - 0.5) * voxel_size
xmin = affine_transform_pointcloud(xmin.unsqueeze(0), tx).squeeze()
xmax = affine_transform_pointcloud(xmax.unsqueeze(0), tx).squeeze()
xmin, xmax = xmin.numpy(), xmax.numpy()
cell_vox_size = (cell_vox_max - cell_vox_min).numpy()
xgrid = np.stack([_.ravel() for _ in np.mgrid[xmin[0]:xmax[0]:cell_vox_size[0] * 1j,
xmin[1]:xmax[1]:cell_vox_size[1] * 1j,
xmin[2]:xmax[2]:cell_vox_size[2] * 1j]], axis=-1)
xgrid = torch.from_numpy(xgrid).to(model.alpha_.dtype)
xgrid = torch.cat([xgrid, torch.ones(xgrid.shape[0], 1).to(xgrid)], dim=-1).to(model.alpha_.dtype)
ygrid = model.predict(xgrid).reshape(tuple(cell_vox_size.astype(np.int))).detach().cpu()
if print_message:
print(f"Evaluated model in {time.time() - eval_start_time}s.")
return ygrid | 7486d27d11c250cccd1c9de5fc65b8f8f773f906 | 4,357 |
from typing import IO
from typing import Iterable
from typing import Mapping
import subprocess
import errno
import csv
import struct
def create_gop(mpeg_file_object: IO[bytes]) -> bytes:
"""Create an index that allows faster seeking.
Note: as far as I can tell, this is not a standard GOP / group of pictures
structure. It is an index that maps frame numbers to stream offsets.
This is referred to as `GOPList` in MoonShell:
misctools/DPGTools/sources/_encvideo.pas
and simply as `GOP` in other implementations."""
def row_to_frame(row: Iterable[str]) -> Mapping[str, str]:
frame = {}
for item in row:
if item == "frame":
continue
key, value = item.split("=", 1)
frame[key] = value
return frame
mpeg_file_object.seek(0)
cmd = [
FFPROBE,
"-hide_banner",
"-print_format", "compact",
"-show_frames", "-select_streams", "v",
"-",
]
gop = b""
frame_number = 0
try:
process = subprocess.Popen(
cmd,
stdin=mpeg_file_object,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
except OSError as os_err:
if os_err.errno == errno.ENOENT:
raise ExternalCommandNotFoundError(cmd)
raise os_err
for row in csv.reader(process.stdout, delimiter="|"):
if not row or row[0] != "frame":
continue
frame = row_to_frame(row)
if frame["pict_type"] == "I":
gop += struct.pack("<l", frame_number)
gop += struct.pack("<l", int(frame["pkt_pos"]))
frame_number += 1
process.wait()
if process.returncode != 0:
stderr = process.stderr.read()
raise ExternalCommandFailedError(process.returncode, process.args, stderr)
return gop | 3738ca12ab7c61d0a595455dcffe326c6171c9eb | 4,358 |
import torch
import tqdm
def beam_search(model, test_data_src, beam_size, max_decoding_time_step):
""" Run beam search to construct hypotheses for a list of src-language sentences.
@param model : Model
@param test_data_src (List[List[str]]): List of sentences (words) in source language, from test set.
@param beam_size (int): beam_size (# of hypotheses to hold for a translation at every step)
@param max_decoding_time_step (int): maximum sentence length that Beam search can produce
@returns hypotheses (List[List[Hypothesis]]): List of Hypothesis translations for every source sentence.
"""
model.eval()
hypotheses = []
with torch.no_grad():
for src_sent in tqdm(test_data_src, desc='Decoding'):
example_hyps = model.beam_search(src_sent, beam_size=beam_size, max_decoding_time_step=max_decoding_time_step)
hypotheses.append(example_hyps)
return hypotheses | ff5a52a336defa4f647a6eb8d7d39c12aa13b9be | 4,359 |
def filter_months(c, months):
"""Filters the collection by matching its date-time index with the specified months."""
indices = find_all_in(get_months(c), get_months(months))
return take_at(c, indices) | c635c80fb007f49c2ef9238830374d0465b488b3 | 4,360 |
import itertools
def kmode_fisher(ks,mus,param_list,dPgg,dPgv,dPvv,fPgg,fPgv,fPvv,Ngg,Nvv, \
verbose=False):
"""
Fisher matrix for fields g(k,mu) and v(k,mu).
Returns F[g+v] and F[g]
dPgg, dPgv, dPvv are dictionaries of derivatives.
fPgg, fPgv, fPvv are fiducial powers.
"""
# Populate Fisher matrix
num_params = len(param_list)
param_combs = itertools.combinations_with_replacement(param_list,2)
Fisher = np.zeros((num_params,num_params))
FisherG = np.zeros((num_params,num_params))
for param1,param2 in param_combs:
i = param_list.index(param1)
j = param_list.index(param2)
if verbose: print("Calculating Fisher for ",param1,param2)
integral = 0.
integralG = 0.
dCov1 = np.array([[dPgg[param1],dPgv[param1]],
[dPgv[param1],dPvv[param1]]])
dCov2 = np.array([[dPgg[param2],dPgv[param2]],
[dPgv[param2],dPvv[param2]]])
Cov = np.array([[Pgg_fid+Ngg,Pgv_fid],
[Pgv_fid,Pvv_fid+Nvv]])
# Integrate over mu and k
for mu_id,mu in enumerate(mus[:-1]):
dmu = mus[mu_id+1]-mus[mu_id]
for k_id,k in enumerate(ks[:-1]):
dk = ks[k_id+1]-ks[k_id]
dC1 = dCov1[:,:,mu_id,k_id]
dC2 = dCov2[:,:,mu_id,k_id]
Cinv = np.linalg.inv(Cov[:,:,mu_id,k_id])
CinvG = 1./Cov[0,0,mu_id,k_id]
trace = np.trace(np.dot(np.dot(dC1,Cinv),np.dot(dC2,Cinv)))
traceG = dC1[0,0]*dC2[0,0]*CinvG**2.
pref = (k**2.)*dk*V/(2.*np.pi)**2./2.*dmu
integral += pref*trace
integralG += pref*traceG
Fisher[i,j] = integral
if j!=i: Fisher[j,i] = integral
FisherG[i,j] = integralG
if j!=i: FisherG[j,i] = integralG
return stats.FisherMatrix(Fisher,param_list), \
stats.FisherMatrix(FisherG,param_list) | e27699d51ce65b284d1289d5e6ae4472ae6fa63e | 4,361 |
def detect_device(model):
"""
Tries to determine the best-matching device for the given model
"""
model = model.lower()
# Try matching based on prefix, this is helpful to map e.g.
# FY2350H to FY2300
for device in wavedef.SUPPORTED_DEVICES:
if device[:4] == model[:4]:
return device
raise wavedef.UnsupportedDeviceError(
"Unable to autodetect device '%s'. "
"Use FYGen(device_name='fy2300') with one of the supported devices, "
"beware that the waveforms might not match up."
"Supported devices: %s"
% (
model,
', '.join(wavedef.SUPPORTED_DEVICES)
)
) | 6b58b5a8dc67a1f30e499c31b545b70ead908aaf | 4,362 |
import re
def re_identify_image_metadata(filename, image_names_pattern):
"""
Apply a regular expression to the *filename* and return metadata
:param filename:
:param image_names_pattern:
:return: a list with metadata derived from the image filename
"""
match = re.match(image_names_pattern, filename)
return None if match is None else match.groups() | 1730620682f2457537e3f59360d998b251f5067f | 4,363 |
import yaml
import sys
def load_config(config_file: str) -> dict:
"""
Function to load yaml configuration file
:param config_file: name of config file in directory
"""
try:
with open(config_file) as file:
config = yaml.safe_load(file)
except IOError as e:
print(e)
sys.exit(1)
return config | f811aa4d6a16d8a5e56d14b91d616f9f8b3ad492 | 4,364 |
def PSF_Moffat(alpha,beta,x,y):
""" Compute the PSF of the instrument with a Moffat function
Parameters
-----------
alpha: float
radial parameter
beta: float
power indice of the function
x: float
position along the x axis
y: float
position along the y axis
wavelength: float
effective wavelength of the filter in angstrom
Returns:
---------
psf: array
psf of the instrument
"""
psf = (beta-1.)/(np.pi*alpha*alpha) * (1.+(x*x+y*y)/(alpha*alpha))**(-beta)
return psf | c70fe8582c27518ab521d3d663da87e1354f0668 | 4,365 |
def _tf_range_for_stmt(iter_,
extra_test,
body,
get_state,
set_state,
init_vars,
basic_symbol_names,
composite_symbol_names,
opts):
"""Overload of for_stmt that iterates over a TF range (and elides it)."""
_disallow_undefs_into_loop(*init_vars)
start, limit, delta = iter_.op.inputs
def while_body(iterate, *loop_vars):
new_vars = body(iterate, *loop_vars)
loop_vars = (iterate + delta,)
if new_vars:
loop_vars += new_vars
return loop_vars
def while_cond(iterate, *loop_vars):
"""Cond function for `tf.while_loop`."""
main_test = math_ops.logical_or(
math_ops.logical_and(delta >= 0, iterate < limit),
math_ops.logical_and(delta < 0, iterate > limit))
if extra_test is not None:
return control_flow_ops.cond(
main_test,
lambda: extra_test(*loop_vars),
lambda: False,
)
return main_test
opts['maximum_iterations'] = math_ops.cast(
misc.get_range_len(start, limit, delta), dtypes.int32)
results = _tf_while_stmt(
while_cond,
while_body,
get_state,
set_state,
(start,) + init_vars,
('<internal iterate>',) + basic_symbol_names,
composite_symbol_names,
opts,
)
# Note: the iteration index is not returned by the while loop, however
# if a symbol with the same name exists outside the loop, it will be captured
# by the loop variables and ultimately updated correctly.
if isinstance(results, (tuple, list)):
assert len(results) >= 1 # Has at least the iterate.
if len(results) > 1:
results = results[1:]
else:
results = ()
return results | 37e21af2fbf5bd910743c220e872c45e08131e97 | 4,366 |
import importlib
def _import_class(module_and_class_name: str) -> type:
"""Import class from a module, e.g. 'text_recognizer.models.MLP'"""
module_name, class_name = module_and_class_name.rsplit(".", 1) # splits into 2 elements at "."
module = importlib.import_module(module_name)
class_ = getattr(module, class_name) # gives us model.class_name attribute (ex: jacques = Person(), jacques.age -> 28)
return class_ | c5666b6393c89bf9cb32a7a3351d3a8706ffd631 | 4,367 |
def mark_task(func):
"""Mark function as a defacto task (for documenting purpose)"""
func._is_task = True
return func | 9f0156fff2a2a6dcb64e79420022b78d1c254490 | 4,368 |
from operator import getitem
import math
def dict_diff(left: Map, right: Map) -> t.List[t.Dict]:
"""Get the difference between 2 dict-like objects
Args:
left (Map): The left dict-like object
right (Map): The right dict-like object
The value returned is a list of dictionaries with keys ["path", "left", "right"]
which contain the query path and the differences between the left and right mapping.
If a key is missing in either mapping, it will be indicated as a "None".
`math.nan` (not-a-number) is used for default values in the comparison because of
the property: `math.nan != math.nan`. Simple None cannot be used, since it would
not handle keys that both have a value of None. In general, this function might
report false-positives for keys that contain the math.nan (or np.nan) value simply
due to this property. There is no workaround available.
"""
left_paths = set(get_valid_access_paths(left, _leaf_only=True, _use_lists=False))
right_paths = set(get_valid_access_paths(right, _leaf_only=True, _use_lists=False))
return list(
{
"path": path,
"left": getitem(left, path, math.nan),
"right": getitem(right, path, math.nan),
}
for path in left_paths.union(right_paths)
if getitem(left, path, math.nan) != getitem(right, path, math.nan)
) | 23f7aa611230879099590b696f9484aa9881a34b | 4,369 |
def make_slack_message_divider() -> dict:
"""Generates a simple divider for a Slack message.
Returns:
The generated divider.
"""
return {'type': 'divider'} | 9d0243c091065056a29d9fa05c62fadde5dcf6f6 | 4,370 |
import os
def username():
""" Return username from env. """
return os.environ["USER"] | adcb6da63823d78203d02b2a3910f92e3ae97724 | 4,371 |
def get_history(filename: str, extension: int = 0) -> str:
"""
Returns the HISTOR header lines.
Args:
filename: image filename.
extension: image extension number.
Returns:
string containing all HISTORY lines.
"""
filename = azcam.utils.make_image_filename(filename)
hdr = pyfits.getheader(filename, extension)
history = hdr["HISTORY"]
return str(history) | 495b5d85a9313c081cf5bea837440dc51e9f8d6e | 4,372 |
def product_detail(request, product_id):
""" A view to show one product's details """
product = get_object_or_404(Product, pk=product_id)
review_form = ReviewForm()
reviews = Review.objects.filter(product_id=product_id).order_by('-created_at')
context = {
'product': product,
'review_form': review_form,
'reviews': reviews,
}
return render(request, 'products/product_detail.html', context) | 5e83dd11b2cfb4e43186c584424e96e35f52333a | 4,373 |
import json
def deserialize_response_content(response):
"""Convert utf-8 encoded string to a dict.
Since the response is encoded in utf-8, it gets decoded to regular python
string that will be a json string. That gets converted to python
dictionary.
Note: Do not use this method to process non-json response.content
:param requests.models.Response response: object that includes attributes
status code and content
:return: response content as decoded dictionary
:rtype: dict
"""
if response.content:
decoded = response.content.decode("utf-8")
if len(decoded) > 0:
return json.loads(decoded)
return {} | ff5494e38f7a6f5b49b4e84e1e8a2ee1633d3872 | 4,374 |
def _remove_header_create_bad_object(remove, client=None):
""" Create a new bucket, add an object without a header. This should cause a failure
"""
bucket_name = get_new_bucket()
if client == None:
client = get_client()
key_name = 'foo'
# remove custom headers before PutObject call
def remove_header(**kwargs):
if (remove in kwargs['params']['headers']):
del kwargs['params']['headers'][remove]
client.meta.events.register('before-call.s3.PutObject', remove_header)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, Body='bar')
return e | bb1f6fb6ca61c7c3137ec4dbf35c2c3500c0e82d | 4,375 |
def SpawnObjectsTab():
"""This function creates a layout containing the object spawning functionality.
Returns:
str : The reference to the layout.
"""
### Create main Layout for the tab
mainTab = cmds.columnLayout(adjustableColumn=True, columnAttach=('both', 20))
cmds.separator(height=10, style="none")
cmds.text(label="Asset Gallery:", align="left")
### Asset Name Text Field
cmds.separator(height=10, style="none")
SpawnObjectsTab.UserField = cmds.textFieldButtonGrp(placeholderText="Write Asset's Name", buttonLabel="Save Asset", buttonCommand=lambda: saveAsset(),
ann="Assign a name for the asset that will be used in the outliner and in the directory hierarchy.")
### Asset Gallery Layout
cmds.separator(height=10, style="none")
cmds.scrollLayout(childResizable=True, height=305, width=455, backgroundColor=(.2,.2,.2))
global objectScroll
objectScroll = cmds.gridLayout(cellWidthHeight=(150,150), autoGrow=True)
populateGallery() # Creates Icons
cmds.setParent(mainTab) # Exit scroll layout
### Choose between Arnold StandIn and Assembly Reference
cmds.separator(height=10, style="none")
global loadMethodRadio
cmds.rowLayout(numberOfColumns=3, adjustableColumn=3)
loadMethodRadio = cmds.radioCollection()
cmds.radioButton("standin", label="Load as Arnold StandIn", select=True,
ann="Arnold standIns bring materials. Render in Arnold to see them.") # Radio button for StandIn
cmds.separator(width=20, style="none")
cmds.radioButton("assembly", label="Load as Assembly Reference",
ann="Assembly references can change their representation mode.") # Radio button for Assembly
cmds.setParent(mainTab)
### Choose how to set the location of the object
cmds.separator(height=10)
cmds.text(label="Spawning method:", align="left")
cmds.separator(height=5, style="none")
cmds.rowLayout(numberOfColumns=4, adjustableColumn=4, columnAttach4=("both","both","both","both"), columnOffset4=(10,10,10,10))
global placingRadio
placingRadio = cmds.radioCollection()
# Create only one copy
cmds.radioButton("single", label="Single Object", select=True,
onCommand=lambda x: cmds.columnLayout(randomControlLayout, edit=True, enable=False),
offCommand=lambda x: cmds.columnLayout(randomControlLayout, edit=True, enable=True),
ann="Create one single object. MMC and drag to scene does the same.")
# Create copies along a curve
cmds.radioButton("curve", label="Along Curve",
ann="Spawn assets along a previously created curve")
# Create copies between a range in world space
cmds.radioButton("range", label="In Range",
onCommand=lambda x: cmds.columnLayout(rangeLayout, edit=True, visible=True),
offCommand=lambda x: cmds.columnLayout(rangeLayout, edit=True, visible=False),
ann="Creates objects in a defined range of coordinates.")
# Create copies on a mesh's surface
cmds.radioButton("mesh", label="On Mesh",
ann="Locate assets on the surface of a selected mesh.")
cmds.setParent(mainTab)
### Randomization parameters
cmds.separator(height=10, style="none")
randomControlLayout = cmds.columnLayout(enable=False)
# How many copies
SpawnObjectsTab.BuildingAmount = cmds.intSliderGrp(label="Building Number", field=True, value=10, min=2, max=50, fieldMaxValue=200)
# Deviation from original rotation
SpawnObjectsTab.RandomRotation = cmds.floatSliderGrp(label="Random Rotation", field=True, value=15, min=0, max=360)
# Deviation from orignal scale
SpawnObjectsTab.RandomScale = cmds.floatSliderGrp(label="Random Scale", field=True, value=0, min=0, max=10)
cmds.setParent(mainTab)
### Range spawning parameters
rangeLayout = cmds.columnLayout(visible=False)
# Min x, y and z coordinates
SpawnObjectsTab.MinimumField = cmds.floatFieldGrp(label="Minimum Range: ", numberOfFields=3)
# Max x, y and z coordinates
SpawnObjectsTab.MaximumField = cmds.floatFieldGrp(label="Maximum Range: ", numberOfFields=3)
cmds.setParent(mainTab)
### Finalize
cmds.separator(height=10, style="none")
cmds.button(label='Load Selected Objects', command=lambda x: choosePlacement(x))
cmds.setParent('..') # Exit column layout
return mainTab | 8381058ce0d7607f81fcdc6ba3e9f03c1495c719 | 4,376 |
from typing import List
from typing import Tuple
def train_val_test_split(relevant_data: List[str], seed: int = 42) -> Tuple[List[str], List[str], List[str]]:
"""Splits a list in seperate train, validate and test datasets.
TODO: add params for train / val / test sizes
:param relevant_data: The list to be divided, generaly a list of filenames.
:dtype relevant_data: List[str]
"""
relevant_data = sorted(relevant_data) # Ensures the input to the split is always the same
train, rest = train_test_split(relevant_data, test_size=0.3, shuffle=True, random_state=seed) # 70% to train
val, test = train_test_split(rest, test_size=0.5, shuffle=True, random_state=seed) # Divide the remaining 30% equally over val and test
return train, val, test | be8acaabfb9d6ad4043ef82f1899f3a8cbcf7ced | 4,377 |
import math
def getShdomDirections(Y_shdom, X_shdom, fov=math.pi/2):
"""Calculate the (SHDOM) direction of each pixel.
Directions are calculated in SHDOM convention where the direction is
of the photons.
"""
PHI_shdom = np.pi + np.arctan2(Y_shdom, X_shdom)
PSI_shdom = -np.pi + fov * np.sqrt(X_shdom**2 + Y_shdom**2)
return PHI_shdom, PSI_shdom | e564f5a9988a6a3a0f14b319ea553dd6bfd7d75a | 4,378 |
def twos_comp(val, bits):
"""returns the 2's complement of int value val with n bits
- https://stackoverflow.com/questions/1604464/twos-complement-in-python"""
if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255
val = val - (1 << bits) # compute negative value
return val & ((2 ** bits) - 1) # return positive value as is | 53bc689f9dd0cf1dff6cd3e073608d1827a3dad9 | 4,379 |
def get_batch(source, i, cnf):
"""
Gets a batch shifted over by shift length
"""
seq_len = min(cnf.batch_size, len(source) - cnf.forecast_window - i)
data = source[i : i + seq_len]
target = source[
i + cnf.forecast_window : i + cnf.forecast_window + seq_len
].reshape(-1)
return data, target | 8f4bbfca44bed498dc22d52706389378bf03f7e0 | 4,380 |
def verify_df(df, constraints_path, epsilon=None, type_checking=None,
repair=True, report='all', **kwargs):
"""
Verify that (i.e. check whether) the Pandas DataFrame provided
satisfies the constraints in the JSON ``.tdda`` file provided.
Mandatory Inputs:
*df*:
A Pandas DataFrame, to be checked.
*constraints_path*:
The path to a JSON ``.tdda`` file (possibly
generated by the discover_df function, below)
containing constraints to be checked.
Or, alternatively, an in-memory dictionary
containing the structured contents of a ``.tdda``
file.
Optional Inputs:
*epsilon*:
When checking minimum and maximum values
for numeric fields, this provides a
tolerance. The tolerance is a proportion
of the constraint value by which the
constraint can be exceeded without causing
a constraint violation to be issued.
For example, with epsilon set to 0.01 (i.e. 1%),
values can be up to 1% larger than a max constraint
without generating constraint failure,
and minimum values can be up to 1% smaller
that the minimum constraint value without
generating a constraint failure. (These
are modified, as appropriate, for negative
values.)
If not specified, an *epsilon* of 0 is used,
so there is no tolerance.
NOTE: A consequence of the fact that these
are proportionate is that min/max values
of zero do not have any tolerance, i.e.
the wrong sign always generates a failure.
*type_checking*:
``strict`` or ``sloppy``.
Because Pandas silently, routinely and
automatically "promotes" integer and boolean
columns to reals and objects respectively
if they contain nulls, strict type checking
can be problematical in Pandas. For this reason,
``type_checking`` defaults to ``sloppy``, meaning
that type changes that could plausibly be
attributed to Pandas type promotion will not
generate constraint values.
If this is set to strict, a Pandas ``float``
column ``c`` will only be allowed to satisfy a
an ``int`` type constraint if::
c.dropnulls().astype(int) == c.dropnulls()
Similarly, Object fields will satisfy a
``bool`` constraint only if::
c.dropnulls().astype(bool) == c.dropnulls()
*repair*:
A boolean to specify whether to try to use the
information in the constraints to attempt to
repair potentially-incorrect type inferrences
made when constructing the dataframe. When the
dataframe has been loaded from a .csv file, this
can often be useful (but should not be used with
dataframes that have come from a more reliable
source).
*report*:
``all`` or ``fields``.
This controls the behaviour of the
:py:meth:`~tdda.constraints.pd.constraints.PandasVerification.__str__` method on
the resulting :py:class:`~tdda.constraints.pd.constraints.PandasVerification`
object (but not its content).
The default is ``all``, which means that
all fields are shown, together with the
verification status of each constraint
for that field.
If report is set to ``fields``, only fields for
which at least one constraint failed are shown.
Returns:
:py:class:`~tdda.constraints.pd.constraints.PandasVerification` object.
This object has attributes:
- *passes* --- Number of passing constriants
- *failures* --- Number of failing constraints
It also has a :py:meth:`~tdda.constraints.pd.constraints.PandasVerification.to_frame()` method for
converting the results of the verification to a Pandas DataFrame,
and a :py:meth:`~tdda.constraints.pd.constraints.PandasVerification.__str__` method to print
both the detailed and summary results of the verification.
Example usage::
import pandas as pd
from tdda.constraints import verify_df
df = pd.DataFrame({'a': [0, 1, 2, 10, np.NaN],
'b': ['one', 'one', 'two', 'three', np.NaN]})
v = verify_df(df, 'example_constraints.tdda')
print('Constraints passing: %d\\n' % v.passes)
print('Constraints failing: %d\\n' % v.failures)
print(str(v))
print(v.to_frame())
See *simple_verification.py* in the :ref:`constraint_examples`
for a slightly fuller example.
"""
pdv = PandasConstraintVerifier(df, epsilon=epsilon,
type_checking=type_checking)
if isinstance(constraints_path, dict):
constraints = DatasetConstraints()
constraints.initialize_from_dict(native_definite(constraints_path))
else:
constraints = DatasetConstraints(loadpath=constraints_path)
if repair:
pdv.repair_field_types(constraints)
return pdv.verify(constraints,
VerificationClass=PandasVerification,
report=report, **kwargs) | 03daa527e9edb61d57a960c335ba574930baf130 | 4,381 |
def logical_and(image1, image2):
"""Logical AND between two videos. At least one of the videos must have
mode "1".
.. code-block:: python
out = ((image1 and image2) % MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_and(image2.im)) | 040ec91a09f0ce7251e3e40f95a087e6d1f81b87 | 4,382 |
def predict(endpoint_id: str, instance: object) -> object:
"""Send a prediction request to a uCAIP model endpoint
Args:
endpoint_id (str): ID of the uCAIP endpoint
instance (object): The prediction instance, should match the input format that the endpoint expects
Returns:
object: Prediction results from the model
"""
return UCAIPService.get().predict_tables(endpoint_id, instance) | 9b7801d23e9aed0fc8292cd1b0a7f9621c313797 | 4,383 |
async def resolve_address(ipaddr, *args, **kwargs):
"""Use a resolver to run a reverse query for PTR records.
See ``dns.asyncresolver.Resolver.resolve_address`` for more
information on the parameters.
"""
return await get_default_resolver().resolve_address(ipaddr, *args, **kwargs) | b5fbd218ba3e8e1d1a51873a4c12cfc304bfc2fd | 4,384 |
def _stringify(item):
"""
Private funtion which wraps all items in quotes to protect from paths
being broken up. It will also unpack lists into strings
:param item: Item to stringify.
:return: string
"""
if isinstance(item, (list, tuple)):
return '"' + '" "'.join(item) + '"'
if isinstance(item, str) and len(item) == 0:
return None
return '"%s"' % item | 7187b33dccce66cb81b53ed8e8c395b74e125633 | 4,385 |
def _get_tree_filter(attrs, vecvars):
"""
Pull attributes and input/output vector variables out of a tree System.
Parameters
----------
attrs : list of str
Names of attributes (may contain dots).
vecvars : list of str
Names of variables contained in the input or output vectors.
Returns
-------
function
A function that takes a System and returns a list of name value pairs.
"""
def _finder(system):
found = []
for attr in attrs:
parts = attr.split('.') # allow attrs with dots
try:
obj = system
for p in parts:
obj = getattr(obj, p)
found.append((attr, obj))
except AttributeError:
pass
for var in vecvars:
if var in system._outputs:
found.append((var, system._outputs[var]))
elif var in system._inputs:
found.append((var, system._inputs[var]))
return found
return _finder | fb96025a075cfc3c56011f937d901d1b87be4f03 | 4,386 |
def gradient2(Y,x,sum_p):
"""
Description
-----------
Used to calculate the gradients of the beta values (excluding the first).
Parameters
----------
Y: label (0 or 1)
x: flux value
sum_p: sum of all beta values (see 'param_sum' function)
Returns
-------
num/denom: gradient value
"""
if Y == 1:
num = -x * np.exp(-sum_p)
denom = 1 + np.exp(-sum_p)
elif Y == 0:
num = x
denom = 1 + np.exp(-sum_p)
return num/denom | 985b0e72419127291b2ddf99e81a129693e7e8fe | 4,387 |
def takeBlock2(aList, row_list, col_list):
"""
Take sublist given from rows specified by row_list and column specified by col_list from
a doublely iterated list.
The convention for the index of the rows and columns are the same as in slicing.
"""
result = []
for row in row_list:
result.append(map(lambda column: aList[row][column], col_list))
return result | 4f891fd0ce8b3bcca88f2f8f6f572ce4253d1d46 | 4,388 |
def param(key, desired_type=None):
"""Return a decorator to parse a JSON request value."""
def decorator(view_func):
"""The actual decorator"""
@wraps(view_func)
def inner(*args, **kwargs):
data = request.get_json() # May raise a 400
try:
value = data[key]
except (KeyError, TypeError):
abort(400, "Missing JSON value '{0}'.".format(key))
if desired_type and not isinstance(value, desired_type):
# For the error message
if desired_type == text_type:
type_name = 'string'
else:
type_name = desired_type.__name__
abort(400, ("Expected '{0}' to be type {1}."
.format(key, type_name)))
# Success, pass through to view function
kwargs[key] = value
return view_func(*args, **kwargs)
return inner
return decorator | 1dca83eb24df9623ddae270ebe1f06461c372af0 | 4,389 |
def make_client(instance):
"""Returns a client to the ClientManager."""
tacker_client = utils.get_client_class(
API_NAME,
instance._api_version[API_NAME],
API_VERSIONS)
LOG.debug('Instantiating tacker client: %s', tacker_client)
kwargs = {'service_type': 'nfv-orchestration',
'region_name': instance._region_name,
'endpoint_type': instance._interface,
'interface': instance._interface,
'session': instance.session
}
client = tacker_client(**kwargs)
return client | 554394a29978b13523532f55d858f25ae1a17619 | 4,390 |
def rasterize(
vectors,
layer=0,
output=None,
nodata=None,
pixel_size=None,
bounds=None,
affine=None,
shape=None,
attribute=None,
fill=0,
default_value=1,
):
"""Rasterize features
Options for definining the boundary and pixel size of rasterization:
User may provide
1) pixel_size only - uses full boundary of features
2) pixel size and bounds - limits features to given boundary
3) affine and shape - both required to determine boundary
Providing output path is optional. Only needed if you want to save
rasterized feature(s) to a GeoTiff
rasterio features rasterization function:
https://rasterio.readthedocs.io/en/latest/topics/features.html
https://rasterio.readthedocs.io/en/latest/api/rasterio.features.html
TODO:
could also use lookup dict with attribute arg for non-binary rasters
where attribute value is not numeric
Args
vectors:
features input, see rasterstats for acceptable inputs
layer: int or string, optional
If `vectors` is a path to an fiona source,
specify the vectors layer to use either by name or number.
defaults to 0
output (str): (optional)
output path for raster of rasterized features
nodata: (optional)
nodata value used if output argument is provided
pixel_size (float):
resolution at which to rasterize features
bounds (tuple):
boundary tuple (xmin, ymin, xmax, ymax)
affine (Affine):
affine transformation used for rasterization
shape (tuple):
shape for rasterization which corresponds with affine (nrows, ncols)
attribute (str):
field to use for assigning cell values instead of `default_value`
fill (int, float):
same as rasterio's features.rasterize `fill`
default_value (int, float):
same as rasterio's features.rasterize `default_value`
Returns
array representing rasterized features
affine of resoluting raster
"""
if (
affine is not None
and isinstance(affine, Affine)
and shape is not None
and isinstance(shape, tuple)
and len(shape) == 2
):
if pixel_size is not None and pixel_size != affine[0]:
warn("Ignoring `pixel_size` provided due to valid affine and shape input.")
if pixel_size is not None and bounds is not None:
alt_affine, alt_shape = get_affine_and_shape(
bounds=bounds, pixel_size=pixel_size
)
if alt_affine != affine or alt_shape != shape:
warn("Ignoring `bounds` due to valid affine and shape input")
elif pixel_size is not None and bounds is not None:
affine, shape = get_affine_and_shape(bounds=bounds, pixel_size=pixel_size)
else:
raise Exception("Must provide either pixel_size and bounds or affine and shape")
features_iter = read_features(vectors, layer)
if attribute is None:
feats = [
(feat["geometry"], default_value)
for feat in features_iter
if feat["geometry"] is not None
]
else:
feats = [
(feat["geometry"], feat["properties"][str(attribute)])
for feat in features_iter
if feat["geometry"] is not None
]
rv_array = features.rasterize(
feats,
out_shape=shape,
transform=affine,
fill=fill,
default_value=default_value,
all_touched=True,
dtype=None,
)
if output is not None:
export_raster(rv_array, affine, output, nodata=nodata)
return rv_array, affine | a8f47fd768f8173c74605f533c1a50974b6acc63 | 4,391 |
def blockchain_key_seed(request):
""" Private key template for the nodes in the private blockchain, allows
different keys to be used for each test to avoid collisions.
"""
# Using the test name as part of the template to force the keys to be
# different accross tests, otherwise the data directories would be the same
# and collisions would happen
return escape_for_format(request.node.name) + "cluster:{}" | 05a940c5a18f816b4bba3fb65e354a5dac2ce1cd | 4,392 |
def wls_simple(X, y, yerr):
"""
weighted least squares: (X.T*W*X)*beta = X.T*W*y
solution: beta = (X.T*X)^-1 * X.T *y
Note
----
wls solves single problems (n_problems=1)
BUT! is able to solve multiple-template (same error) problems
Parameters
----------
X: predictors
(n_obs, n_term) array
yerr: error of response
(n_obs, ) weight matrix
y: response
(n_obs, n_problems) array
Return
------
beta: coefficients
(n_term, ) array
"""
yerr = yerr.reshape(-1, 1) # column vector
yerr = np.where((yerr > 0) & np.isfinite(yerr), yerr, 1e5)
X_ = X / yerr
y_ = y / yerr
beta = ols(np.matmul(X_.T, X_), np.matmul(X_.T, y_))
return beta | d428eb22dee7b587788e065a7dd883992f183ef7 | 4,393 |
import re
import copy
def _filter(dict_packages, expression):
"""Filter the dict_packages with expression.
Returns:
dict(rst): Filtered dict with that matches the expression.
"""
expression_list = ['(' + item + ')' for item in expression.split(',')]
expression_str = '|'.join(expression_list)
compiled_exp = re.compile('(?i:^(' + expression_str + ')$)')
cp_dict_packages = copy.deepcopy(dict_packages)
for key in dict_packages.keys():
match = re.search(compiled_exp, key)
if not match:
del cp_dict_packages[key]
return cp_dict_packages | 866bca2847b9d3c8220319f4b394f932931fc076 | 4,394 |
def multi_index_tsv_to_dataframe(filepath, sep="\t", header_rows=None):
"""
Loads a multi-header tsv file into a :py:class:`pd.DataFrame`.
Parameters
----------
filepath : `str`
Path pointing to the tsv file.
sep : `str`, optional, default: '\t'
Character to use as the delimiter.
header_rows : `list`, optional, default: None
0-based indicies corresponding to the row locations to use as the
multi-index column names in the dataframe. Example:
condition E3 E3
value pvalue_raw z
_sy 8.6e-05 3.92
p.Ala16Arg 0.0 3.76raw_barcodes_counts.tsv
The *header_rows* for this instance will be [0, 1]
If not supplied, `header_rows` will be inferred from the file.
Returns
-------
:py:class:`~pd.DataFrame`
A :py:class:`pd.MultiIndex` dataframe.
"""
if header_rows is None:
header_rows = infer_multiindex_header_rows(filepath)
if header_rows == [0] or not header_rows:
return pd.read_table(filepath, index_col=0, sep=sep)
else:
try:
return pd.read_table(filepath, index_col=0, sep=sep, header=header_rows)
except IndexError:
return pd.read_table(filepath, index_col=0, sep=sep) | 9ae5816aed2bfacd05d4130ccc1598c037b9b353 | 4,395 |
def generate_summoner_tab_summoner(db, profile, ss):
"""
:type db: darkarisulolstats.lolstats.database.Database
"""
summoner = {}
for s in ss:
raw_summoner = db.summoners.get(s)
if "profileIconPath" not in summoner:
summoner["profileIconPath"] = data.DataDragon.get_profile_icon_path(raw_summoner["profileIconId"])
summoner["name"] = profile
if "level" not in summoner:
summoner["level"] = raw_summoner["summonerLevel"]
else:
summoner["level"] += raw_summoner["summonerLevel"]
summoner["Playtime"] = 0
raw_matches = db.preprocessed.get(profile, "matchlists")
for raw_match in raw_matches:
summoner["Playtime"] += raw_match["duration"]
return summoner | 0bee2b48c71910ed273925a7cae1c4539b411401 | 4,396 |
def preserve_quotes (s):
"""
Removes HTML tags around greentext.
"""
return quot_pattern.sub(get_first_group, s) | a87ac4ee7fdb0e0c879047066e805f2c9382c599 | 4,397 |
import os
import configparser
import logging
def test_init_logger(monkeypatch):
"""
Tests `init_logger()`.
"""
test_config_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'test_config')
def mock_get_conf_path():
"""
Replaces the conf path with the one for mock confs in unit tests.
"""
return test_config_dir
monkeypatch.setattr(dirs, 'get_conf_path', mock_get_conf_path)
config.init_logger()
logger_conf_file = os.path.join(test_config_dir, 'logger.conf')
logger_cp = configparser.RawConfigParser()
logger_cp.read(logger_conf_file)
assert config.find_existing_handler_from_config(
logger_cp, 'fileHandler') is not None
assert config.find_existing_handler_from_config(
logger_cp, 'stdoutHandler') is not None
assert config.find_existing_handler_from_config(
logger_cp, 'stderrHandler') is not None
assert config.find_existing_handler_from_config(
logger_cp, 'disabledHandler') is not None
stdout_handler = config.find_existing_handler_from_config(
logger_cp, 'stdoutHandler')
assert stdout_handler is not None
assert stdout_handler.filters[0]._max_inc_levelno \
== logging.INFO # pylint: disable=protected-access
root_logger = logging.getLogger()
def clear_handlers():
"""
Clears all handlers from the root logger.
"""
existing_handlers = root_logger.handlers
for h_existing in existing_handlers:
root_logger.removeHandler(h_existing)
clear_handlers()
config.init_logger('VeRBoSe')
# Since level changed, cannot use existing function
# Only matching on format -- expected to be unique in this mock conf
for h_existing in root_logger.handlers:
h_conf = logger_cp['handler_stdoutHandler']
h_conf_fmt = logger_cp[ \
f'formatter_{h_conf["formatter"]}']['format'].strip()
if h_existing.formatter._fmt \
!= h_conf_fmt: # pylint: disable=protected-access
continue
stdout_handler = h_existing
break
assert stdout_handler.level == logging.NOTSET
clear_handlers()
config.init_logger(40)
# Since level changed, cannot use existing function
# Only matching on format -- expected to be unique in this mock conf
for h_existing in root_logger.handlers:
h_conf = logger_cp['handler_stderrHandler']
h_conf_fmt = logger_cp[ \
f'formatter_{h_conf["formatter"]}']['format'].strip()
if h_existing.formatter._fmt \
!= h_conf_fmt: # pylint: disable=protected-access
continue
stderr_handler = h_existing
break
assert stderr_handler.level == 40 | 5beb83bfd4b858b50e2572c2907c0536193b30bb | 4,398 |
def with_whitespace_inside(expr):
""" Returns an expression that allows for whitespace inside, but not
outside the expression.
"""
return Combine(OneOrMore(expr | White(' ', max=1) + expr)) | 9306ffb73277d249062ffca45ded9d0bd9a45e3c | 4,399 |
Subsets and Splits