content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import Union
from typing import List
def sym_to_elm(symbols: Union[str, List, np.ndarray],
order: Union[np.ndarray, List[str]]):
"""Transform symbols to elements."""
if not isinstance(order, list):
order = order.tolist()
if not isinstance(symbols, (str, list)):
symbols = symbols.tolist()
if isinstance(symbols, str):
if symbols in order:
return order.index(symbols)
else:
return -1
else:
return np.array([sym_to_elm(s, order) for s in symbols]) | 16e2a88b353556068e8c1a3fa7c831264fd9f3c5 | 8,269 |
def set_custom_field(
custom_field_id: str = None,
person_id: str = None,
owner_id: str = None,
term_id: str = None,
value: str = None,
option_index: str = None):
"""
Sets a custom field value for a particular person, organization, or donation.
:param custom_field_id: The numeric ID of the custom field you're interested in.
:param person_id: The numeric ID of the person you're interested in.
:param owner_id: The numeric ID of object you're interested in, if they are not a person.
:param term_id: The numeric ID of the term you're interested in.
:param value: The value for this field.
:param option_index: For RADIOs and SELECTs, you can pass in the index of the selected option.
:returns: String containing xml or an lxml element.
"""
return get_anonymous(
'setCustomField',
custom_field_id=custom_field_id,
person_id=person_id,
owner_id=owner_id,
term_id=term_id,
value=value,
option_index=option_index) | 3d9471c62644e8e19f7b6faa03f3b503d5db7673 | 8,270 |
def ixn_is_increases_activity(ixn: ChemGeneIxn):
"""Checks if the interaction results in the decrease of the activity of the protein of the gene
:param pyctd.manager.models.ChemGeneIxn ixn: A chemical-gene interaction
:rtype: bool
"""
return _ixn_is_changes_protein(ixn, 'increases^activity') | 0b324a953ed2a90a9a357965ad4e5ef4a635c2df | 8,271 |
def load(csv, sep=';'):
"""
Load data into dataframe
:param csv:
:param sep:
:return:
"""
data = pd.read_csv(csv, sep=sep)
return data | da988e31601b13a767178b4d6613d948100ddfc9 | 8,272 |
def count_lost_norm4(matrix):
"""calculate 4th lost points: Proportion of dark modules in entire symbol:
50 + (5 + k) or 50 - (5 + k), return k * 10
Args:
matrix ([type]): [description]
Returns:
[int]: [description]
"""
dark_sum = np.sum(matrix)
modules_num = matrix.size
dark_ratio = dark_sum / modules_num
k = abs((dark_ratio * 100 - 50)) / 5
return int(k) * 10 | ad05892952af5cfc5dbd8273bbc1357d31b1a295 | 8,274 |
def sumaDigits(s):
"""assumes s is a string and returns the sum of the
decimal digits in s. For example if s is 'a2b3c' it returns 5"""
suma = 0
for c in s:
try:
suma+=int(c)
except ValueError:
continue
return suma | 47b09476925d45741d97eca5362e736f83a8185d | 8,275 |
def f5_list_policy_file_types_command(client: Client, policy_md5: str) -> CommandResults:
"""
Get a list of all policy file types.
Args:
client (Client): f5 client.
policy_md5 (str): MD5 hash of the policy.
"""
result = client.list_policy_file_types(policy_md5)
table_name = 'f5 data for listing policy file types:'
readable_output, printable_result = build_command_result(result, table_name)
command_results = CommandResults(
outputs_prefix='f5.FileType',
outputs_key_field='id',
readable_output=readable_output,
outputs=printable_result,
raw_response=result
)
return command_results | b2b57d281b0cc3ea0ff7430d2033255071761a46 | 8,276 |
def recurrent_layer(input,
act=None,
bias_attr=None,
param_attr=None,
name=None,
reverse=False,
layer_attr=None):
"""
Simple recurrent unit layer. It is just a fully connect layer through both
time and neural network.
For each sequence [start, end] it performs the following computation\:
.. math::
out_{i} = act(in_{i}) \\ \\ \\text{for} \\ i = start \\\\
out_{i} = act(in_{i} + out_{i-1} * W) \\ \\ \\text{for} \\ start < i <= end
If reversed is true, the order is reversed\:
.. math::
out_{i} = act(in_{i}) \\ \\ \\text{for} \\ i = end \\\\
out_{i} = act(in_{i} + out_{i+1} * W) \\ \\ \\text{for} \\ start <= i < end
:param input: Input Layer
:type input: LayerOutput
:param act: activation.
:type act: BaseActivation
:param bias_attr: bias attribute.
:type bias_attr: ParameterAttribute
:param param_attr: parameter attribute.
:type param_attr: ParameterAttribute
:param name: name of the layer
:type name: basestring
:param layer_attr: Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
Layer(
name=name,
type=LayerType.RECURRENT_LAYER,
inputs=Input(input.name, **param_attr.attr),
active_type=act.name,
bias=ParamAttr.to_bias(bias_attr),
reversed=reverse,
**ExtraAttr.to_kwargs(layer_attr))
return LayerOutput(
name=name,
layer_type=LayerType.RECURRENT_LAYER,
parents=[input],
size=input.size,
activation=act,
reverse=reverse) | b616d372a9324c11aa1bb524d96844ce1e8c47e5 | 8,277 |
def mean_center(X):
"""
@param X: 2-dimensional matrix of number data
@type X: numpy array
@return: Mean centered X (always has same dimensions as X)
"""
(rows, cols) = shape(X)
new_X = zeros((rows, cols), float)
_averages = average(X, 0)
for row in range(rows):
new_X[row, 0:cols] = X[row, 0:cols] - _averages[0:cols]
return new_X | 54885596c95856b0ce0f7fe68d2922641e7a830a | 8,278 |
import csv
from io import StringIO
def process_xlsform(xls, default_name):
"""
Process XLSForm file and return the survey dictionary for the XLSForm.
"""
# FLOW Results package is a JSON file.
file_object = None
if xls.name.endswith('csv'):
# a csv file gets closed in pyxform, make a copy
xls.seek(0)
file_object = BytesIO()
file_object.write(xls.read())
file_object.seek(0)
xls.seek(0)
try:
return parse_file_to_json(xls.name, file_object=file_object or xls)
except csv.Error as e:
if is_newline_error(e):
xls.seek(0)
file_object = StringIO(
u'\n'.join(xls.read().splitlines()))
return parse_file_to_json(
xls.name, default_name=default_name, file_object=file_object)
raise e | af695bef6f063b2bfa7862e856e16ab42be2db96 | 8,280 |
def unflatten(X: np.ndarray, Y: np.ndarray, shape: tuple):
""" Unflattens images with shape defined by list of tuples s
X is an array (1D), unflattened to 2D
Y is an array (1D) of flattened mask (flattened 2D label) array
Not that X and Y are not compatible dimensions
s denotes dimensions of the *INPUT* image
len(s) == 3 : reshape to 2D label image
len(s) == 2 : input is flattened image, ignore.
"""
# This need to be tested.
Yout = Y.copy()
Yout[Y!=LABEL_IGNORE] = X
Yout = np.reshape(Yout,(shape[0], shape[1]))
return Yout | 7a1a79b165d44efd55c2e66936e072298cd5d648 | 8,281 |
from typing import Dict
from typing import Any
from typing import List
import logging
from pathlib import Path
def collate_features(model_config: Dict[str, Any], dummy_features: List[str]) -> List[str]:
"""Saves and returns final list of simple and dummy features."""
simple_features = list(model_config.get("simple_features", {}).keys())
features = simple_features + dummy_features
logging.info(
f"Model uses {len(simple_features)} simple features and"
+ f"{len(dummy_features)} dummy features"
+ f"for {len(features)} features total"
)
output_path = Path(utils.get_model_path(model_config), "features.txt")
logging.info(f"Saving list of features to {output_path}")
with open(output_path, "w") as f:
for feature in features:
f.write(feature)
f.write("\n")
return features | e66e3aceb0b5bf093a4fc3165a30694875903b73 | 8,282 |
from typing import Type
def new_dga(*, key_mo=None, pred=None, deg_diff=None) -> Type[DgaGb]:
"""Return a dynamically created subclass of GbDga.
When key_mo=None, use revlex ordering by default."""
class_name = f"GbDga_{DgaGb._index_subclass}"
DgaGb._index_subclass += 1
if deg_diff is not None:
deg_diff = Vector(deg_diff)
else:
raise BA.MyDegreeError("degree of differential not supplied")
dct = {
"gens": {},
"rels": {},
"_rels_buffer": {},
"key_mo": key_mo,
"pred": pred or pred_always_true,
"dim_grading": None,
"deg_diff": deg_diff,
}
return type(class_name, (DgaGb,), dct) | 32cfa58eec7512dd7b39b2298608df538a232ef9 | 8,283 |
def is_xarray(array):
"""Return True if array is a xarray.DataArray
Parameters
----------
array : array-like
Returns
-------
test : bool
"""
return isinstance(array,xr.DataArray) | edf14a0c87e6590e6a583425ec830e51defe5fa1 | 8,284 |
from typing import Counter
def _check_duplicate_gnames(block_id, block_dict, extra_args):
"""
Return False if any duplicate group names exist in /etc/group file, else return True
"""
gnames = _execute_shell_command("cat /etc/group | cut -f1 -d\":\"", python_shell=True).strip()
gnames = gnames.split('\n') if gnames != "" else []
duplicate_gnames = [k for k, v in Counter(gnames).items() if v > 1]
if duplicate_gnames is None or duplicate_gnames == []:
return True
return str(duplicate_gnames) | 2a181ca67f87f0f90eb97c1df0e7ae8db6ee2206 | 8,285 |
def join_nonempty(l):
"""
Join all of the nonempty string with a plus sign.
>>> join_nonempty(('x1 + x2 + x1:x2', 'x3 + x4'))
'x1 + x2 + x1:x2 + x3 + x4'
>>> join_nonempty(('abc', '', '123', ''))
'abc + 123'
"""
return ' + '.join(s for s in l if s != '') | 041948f95caaef14cb96e761f08b4a84fba37d6e | 8,286 |
import torch
def correct_msa_restypes(protein):
"""Correct MSA restype to have the same order as rc."""
new_order_list = rc.MAP_HHBLITS_AATYPE_TO_OUR_AATYPE
new_order = torch.tensor(
[new_order_list] * protein["msa"].shape[1],
device=protein["msa"].device,
).transpose(0, 1)
protein["msa"] = torch.gather(new_order, 0, protein["msa"])
perm_matrix = np.zeros((22, 22), dtype=np.float32)
perm_matrix[range(len(new_order_list)), new_order_list] = 1.0
for k in protein:
if "profile" in k:
num_dim = protein[k].shape.as_list()[-1]
assert num_dim in [
20,
21,
22,
], "num_dim for %s out of expected range: %s" % (k, num_dim)
protein[k] = torch.dot(protein[k], perm_matrix[:num_dim, :num_dim])
return protein | 881736333e3153c9c7713f7a54252eba705b7bb8 | 8,287 |
def plot_bootstrap_lr_grp(dfboot, df, grp='grp', prm='premium', clm='claim',
title_add='', force_xlim=None):
""" Plot bootstrapped loss ratio, grouped by grp """
count_txt_h_kws, mean_txt_kws, pest_mean_point_kws, mean_point_kws = _get_kws_styling()
if dfboot[grp].dtypes != 'object':
dfboot = dfboot.copy()
dfboot[grp] = dfboot[grp].map(lambda x: f's{x}')
mn = dfboot.groupby(grp)['lr'].mean().tolist()
pest_mn = df.groupby(grp).apply(lambda g: np.nan_to_num(g[clm], 0).sum() / g[prm].sum()).values
f = plt.figure(figsize=(14, 2+(len(mn)*.25))) #, constrained_layout=True)
gs = gridspec.GridSpec(1, 2, width_ratios=[11, 1], figure=f)
ax0 = f.add_subplot(gs[0])
ax1 = f.add_subplot(gs[1], sharey=ax0)
_ = sns.violinplot(x='lr', y=grp, data=dfboot, kind='violin', cut=0,
scale='count', width=0.6, palette='cubehelix_r', ax=ax0)
_ = [ax0.plot(v, i%len(mn), **mean_point_kws) for i, v in enumerate(mn)]
_ = [ax0.annotate(f'{v:.1%}', xy=(v, i%len(mn)), **mean_txt_kws) for i, v in enumerate(mn)]
_ = [ax0.plot(v, i%len(pest_mn), **pest_mean_point_kws) for i, v in enumerate(pest_mn)]
elems = [Line2D([0],[0], label='population (bootstrap)', **mean_point_kws),
Line2D([0],[0], label='sample', **pest_mean_point_kws)]
_ = ax0.legend(handles=elems, title='Mean LRs') #loc='upper right',
if force_xlim is not None:
_ = ax0.set(xlim=force_xlim)
_ = sns.countplot(y=grp, data=df, ax=ax1, palette='cubehelix_r')
ct = df.groupby(grp).size().tolist()
_ = [ax1.annotate(f'{v}', xy=(v, i%len(ct)), **count_txt_h_kws) for i, v in enumerate(ct)]
ypos = 1.01
if title_add != '':
ypos = 1.03
title_add = f'\n{title_add}'
title = (f'Grouped Loss Ratios (Population Estimates via Bootstrapping)' +
f' - grouped by {grp}')
_ = f.suptitle(f'{title}{title_add}', y=ypos)
plt.tight_layout()
return gs | 11a0276ab1eac233db537943b5af67f0452f89db | 8,288 |
def ajax_user_search(request):
"""
returns the user search result. currently this is not used since search user feature changed to form post.
"""
if request.method=='POST':
username=request.POST.get('username','')
users=User.objects.filter(username__contains=username)
try:
brand=int(request.POST['company'])
users=users.filter(userprofile__work_for=brand)
except:
pass
return render_to_response('ajax/user_search.html', {'users':users,}, mimetype='text/html') | 8318b881280e47ff28ea8db259df607b1e5bf7fb | 8,289 |
def shortest_path(start, end):
"""
Using 2-way BFS, finds the shortest path from start_position to
end_position. Returns a list of moves.
You can use the rubik.quarter_twists move set.
Each move can be applied using rubik.perm_apply
"""
if start == (7, 8, 6, 20, 18, 19, 3, 4, 5, 16, 17, 15, 0, 1, 2, 14, 12, 13, 10, 11, 9, 21, 22, 23):
return None
return bfs((start, None), end) | c75b54b434c09f6d570f79c40453bd465a2a439b | 8,290 |
def to_matrix_vector(transform):
"""
Code from nilearn module, available at: https://github.com/nilearn/nilearn/blob/master/nilearn/image/resampling.py
Split an homogeneous transform into its matrix and vector components.
The transformation must be represented in homogeneous coordinates.
It is split into its linear transformation matrix and translation vector
components.
This function does not normalize the matrix. This means that for it to be
the inverse of from_matrix_vector, transform[-1, -1] must equal 1, and
transform[-1, :-1] must equal 0.
Parameters
----------
transform: numpy.ndarray
Homogeneous transform matrix. Example: a (4, 4) transform representing
linear transformation and translation in 3 dimensions.
Returns
-------
matrix, vector: numpy.ndarray
The matrix and vector components of the transform matrix. For
an (N, N) transform, matrix will be (N-1, N-1) and vector will be
a 1D array of shape (N-1,).
See Also
--------
from_matrix_vector
"""
ndimin = transform.shape[0] - 1
ndimout = transform.shape[1] - 1
matrix = transform[0:ndimin, 0:ndimout]
vector = transform[0:ndimin, ndimout]
return matrix, vector | b971f3b53199a16bbf2343ed544389cbc21f1644 | 8,292 |
def sieveEr(N):
"""
input: positive integer 'N' > 2
returns a list of prime numbers from 2 up to N.
This function implements the algorithm called
sieve of erathostenes.
"""
# precondition
assert isinstance(N,int) and (N > 2), "'N' must been an int and > 2"
# beginList: conatins all natural numbers from 2 upt to N
beginList = [x for x in range(2,N+1)]
ans = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(beginList)):
for j in range(i+1,len(beginList)):
if (beginList[i] != 0) and \
(beginList[j] % beginList[i] == 0):
beginList[j] = 0
# filters actual prime numbers.
ans = [x for x in beginList if x != 0]
# precondition
assert isinstance(ans,list), "'ans' must been from type list"
return ans | 8d48d2a491341d5302307597ad64ac4a37b1abb8 | 8,293 |
def validate_fields(item, fields=None):
"""
Check that all requested fields were returned
:param item: comment or submission
:param fields: list[str]
:return: list[str]
"""
actual_fields = item.d_.keys()
if fields is None:
requested_fields = actual_fields
else:
requested_fields = fields
missing_fields = set(requested_fields).difference(actual_fields)
# drop extra fields returned from api
final_fields = set(requested_fields).intersection(actual_fields)
return final_fields, missing_fields | 88bd6d20ba1cc04f8478128f7f32192ef680762b | 8,294 |
import warnings
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None, target_imbalance_ratio=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, y,
target_imbalance_ratio, verbose)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree | a5c027d96bd96522544e56bd427ae39a5075e6b8 | 8,295 |
from typing import Iterable
def remove_nones(sequence: Iterable) -> list:
"""Removes elements where bool(x) evaluates to False.
Examples
--------
Normal usage::
remove_nones(['m', '', 'l', 0, 42, False, True])
# ['m', 'l', 42, True]
"""
# Note this is redundant with it.chain
return [x for x in sequence if x] | 975c0104b3cc05bb82fa211c1b85b49c7d3cb174 | 8,296 |
from typing import List
import pathlib
def retrieve(passed: List[str]) -> List[str]:
"""
Retrieves all items that are able to be
converted, recursively, from the passed list.
Parameters
----------
passed: List[str]
The items to search.
Returns
-------
List[str]:
All found items.
"""
ret = []
for item in passed:
try:
path = pathlib.Path(item)
if path.is_file() and path.suffix == ".txt":
ret += retrieve(path.read_text().split("\n"))
elif path.is_file():
ret.append(str(path))
elif path.is_dir():
ret += retrieve([str(p) for p in path.iterdir()])
else:
ret.append(item)
except OSError:
ret.append(item)
return ret | 6789255e302caf9dc6e481df532acec20dfc6b3c | 8,298 |
from typing import List
from typing import Optional
from typing import Dict
from typing import Tuple
from typing import Any
def get_out_of_sample_best_point_acqf(
model: Model,
Xs: List[Tensor],
X_observed: Tensor,
objective_weights: Tensor,
mc_samples: int = 512,
fixed_features: Optional[Dict[int, float]] = None,
fidelity_features: Optional[List[int]] = None,
target_fidelities: Optional[Dict[int, float]] = None,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
seed_inner: Optional[int] = None,
qmc: bool = True,
**kwargs: Any,
) -> Tuple[AcquisitionFunction, Optional[List[int]]]:
"""Picks an appropriate acquisition function to find the best
out-of-sample (predicted by the given surrogate model) point
and instantiates it.
NOTE: Typically the appropriate function is the posterior mean,
but can differ to account for fidelities etc.
"""
model = model
# subset model only to the outcomes we need for the optimization
if kwargs.get(Keys.SUBSET_MODEL, True):
subset_model_results = subset_model(
model=model,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
)
model = subset_model_results.model
objective_weights = subset_model_results.objective_weights
outcome_constraints = subset_model_results.outcome_constraints
fixed_features = fixed_features or {}
target_fidelities = target_fidelities or {}
if fidelity_features:
# we need to optimize at the target fidelities
if any(f in fidelity_features for f in fixed_features):
raise RuntimeError("Fixed features cannot also be fidelity features.")
elif set(fidelity_features) != set(target_fidelities):
raise RuntimeError(
"Must provide a target fidelity for every fidelity feature."
)
# make sure to not modify fixed_features in-place
fixed_features = {**fixed_features, **target_fidelities}
elif target_fidelities:
raise RuntimeError(
"Must specify fidelity_features in fit() when using target fidelities."
)
acqf_class, acqf_options = pick_best_out_of_sample_point_acqf_class(
outcome_constraints=outcome_constraints,
mc_samples=mc_samples,
qmc=qmc,
seed_inner=seed_inner,
)
objective, posterior_transform = get_botorch_objective_and_transform(
model=model,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
X_observed=X_observed,
)
if objective is not None:
if not isinstance(objective, MCAcquisitionObjective):
raise UnsupportedError(
f"Unknown objective type: {objective.__class__}" # pragma: nocover
)
acqf_options = {"objective": objective, **acqf_options}
if posterior_transform is not None:
acqf_options = {"posterior_transform": posterior_transform, **acqf_options}
acqf = acqf_class(model=model, **acqf_options) # pyre-ignore [45]
if fixed_features:
acqf = FixedFeatureAcquisitionFunction(
acq_function=acqf,
d=X_observed.size(-1),
columns=list(fixed_features.keys()),
values=list(fixed_features.values()),
)
non_fixed_idcs = [i for i in range(Xs[0].size(-1)) if i not in fixed_features]
else:
non_fixed_idcs = None
return acqf, non_fixed_idcs | a6331759833b4715275fdb3ca7d19c237c2c7e55 | 8,299 |
def removeBots(gdf, bot_list):
"""
A Function for removing Twitter bots.
Parameters
----------
gdf: <gpd.GeoDataFrame>
A GeoDataFrame from which Twitter bots should be removed.
bot_list: <list>
Input either 'home_unique_days' or 'home_unique_weeks'
Output
------
<gpd.GeoDataFrame>
A processed GeoDataFrame. Likely bots removed.
"""
copy = gdf
for index, row in gdf.iterrows():
userid = str(row['user']['id'])
for item in bot_list:
bot_id = item['userid']
if bot_id == userid:
gdf = gdf.drop(index)
print("A bot dropped: ID", userid, ". Length of GDF now: ", len(gdf))
print("Processing: ", index, "/", len(copy))
return(gdf) | e938f46bcf5c87dfa81db96f127c88d948f061db | 8,300 |
def getinput(prompt):
""">> getinput <prompt>
Get input, store it in '__input__'.
"""
local_dict = get_twill_glocals()[1]
inp = input(prompt)
local_dict['__input__'] = inp
return inp | db26e8361518f1728edfb15c6417586f8c3ca73d | 8,301 |
def update_trails(force=False, offline=False):
"""
Update trails from feeds
"""
success = False
trails = {}
duplicates = {}
try:
if not os.path.isdir(USERS_DIR):
os.makedirs(USERS_DIR, 0755)
except Exception, ex:
exit("[!] something went wrong during creation of directory '%s' ('%s')" % (USERS_DIR, ex))
_chown(USERS_DIR)
if config.UPDATE_SERVER:
print "[i] retrieving trails from provided 'UPDATE_SERVER' server..."
content = retrieve_content(config.UPDATE_SERVER)
if not content or content.count(',') < 2:
print "[x] unable to retrieve data from '%s'" % config.UPDATE_SERVER
else:
with _fopen(TRAILS_FILE, "w+b") as f:
f.write(content)
trails = load_trails()
else:
trail_files = set()
for dirpath, dirnames, filenames in os.walk(os.path.abspath(os.path.join(ROOT_DIR, "trails"))) :
for filename in filenames:
trail_files.add(os.path.abspath(os.path.join(dirpath, filename)))
if config.CUSTOM_TRAILS_DIR:
for dirpath, dirnames, filenames in os.walk(os.path.abspath(os.path.join(ROOT_DIR, os.path.expanduser(config.CUSTOM_TRAILS_DIR)))) :
for filename in filenames:
trail_files.add(os.path.abspath(os.path.join(dirpath, filename)))
if not trails and (force or not os.path.isfile(TRAILS_FILE) or (time.time() - os.stat(TRAILS_FILE).st_mtime) >= config.UPDATE_PERIOD or os.stat(TRAILS_FILE).st_size == 0 or any(os.stat(_).st_mtime > os.stat(TRAILS_FILE).st_mtime for _ in trail_files)):
if not config.no_updates:
print "[i] updating trails (this might take a while)..."
else:
print "[i] checking trails..."
if not offline and (force or config.USE_FEED_UPDATES):
_ = os.path.abspath(os.path.join(ROOT_DIR, "trails", "feeds"))
if _ not in sys.path:
sys.path.append(_)
filenames = sorted(glob.glob(os.path.join(_, "*.py")))
else:
filenames = []
_ = os.path.abspath(os.path.join(ROOT_DIR, "trails"))
if _ not in sys.path:
sys.path.append(_)
filenames += [os.path.join(_, "static")]
filenames += [os.path.join(_, "custom")]
filenames = [_ for _ in filenames if "__init__.py" not in _]
if config.DISABLED_FEEDS:
filenames = [filename for filename in filenames if os.path.splitext(os.path.split(filename)[-1])[0] not in re.split(r"[^\w]+", config.DISABLED_FEEDS)]
for i in xrange(len(filenames)):
filename = filenames[i]
try:
module = __import__(os.path.basename(filename).split(".py")[0])
except (ImportError, SyntaxError), ex:
print "[x] something went wrong during import of feed file '%s' ('%s')" % (filename, ex)
continue
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "fetch":
print(" [o] '%s'%s" % (module.__url__, " " * 20 if len(module.__url__) < 20 else ""))
sys.stdout.write("[?] progress: %d/%d (%d%%)\r" % (i, len(filenames), i * 100 / len(filenames)))
sys.stdout.flush()
if config.DISABLED_TRAILS_INFO_REGEX and re.search(config.DISABLED_TRAILS_INFO_REGEX, getattr(module, "__info__", "")):
continue
try:
results = function()
for item in results.items():
if item[0].startswith("www.") and '/' not in item[0]:
item = [item[0][len("www."):], item[1]]
if item[0] in trails:
if item[0] not in duplicates:
duplicates[item[0]] = set((trails[item[0]][1],))
duplicates[item[0]].add(item[1][1])
if not (item[0] in trails and (any(_ in item[1][0] for _ in LOW_PRIORITY_INFO_KEYWORDS) or trails[item[0]][1] in HIGH_PRIORITY_REFERENCES)) or (item[1][1] in HIGH_PRIORITY_REFERENCES and "history" not in item[1][0]) or any(_ in item[1][0] for _ in HIGH_PRIORITY_INFO_KEYWORDS):
trails[item[0]] = item[1]
if not results and "abuse.ch" not in module.__url__:
print "[x] something went wrong during remote data retrieval ('%s')" % module.__url__
except Exception, ex:
print "[x] something went wrong during processing of feed file '%s' ('%s')" % (filename, ex)
try:
sys.modules.pop(module.__name__)
del module
except Exception:
pass
# custom trails from remote location
if config.CUSTOM_TRAILS_URL:
print(" [o] '(remote custom)'%s" % (" " * 20))
for url in re.split(r"[;,]", config.CUSTOM_TRAILS_URL):
url = url.strip()
if not url:
continue
url = ("http://%s" % url) if not "//" in url else url
content = retrieve_content(url)
if not content:
print "[x] unable to retrieve data (or empty response) from '%s'" % url
else:
__info__ = "blacklisted"
__reference__ = "(remote custom)" # urlparse.urlsplit(url).netloc
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#'):
continue
line = re.sub(r"\s*#.*", "", line)
if '://' in line:
line = re.search(r"://(.*)", line).group(1)
line = line.rstrip('/')
if line in trails and any(_ in trails[line][1] for _ in ("custom", "static")):
continue
if '/' in line:
trails[line] = (__info__, __reference__)
line = line.split('/')[0]
elif re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", line):
trails[line] = (__info__, __reference__)
else:
trails[line.strip('.')] = (__info__, __reference__)
for match in re.finditer(r"(\d+\.\d+\.\d+\.\d+)/(\d+)", content):
prefix, mask = match.groups()
mask = int(mask)
if mask > 32:
continue
start_int = addr_to_int(prefix) & make_mask(mask)
end_int = start_int | ((1 << 32 - mask) - 1)
if 0 <= end_int - start_int <= 1024:
address = start_int
while start_int <= address <= end_int:
trails[int_to_addr(address)] = (__info__, __reference__)
address += 1
# basic cleanup
for key in trails.keys():
if key not in trails:
continue
if config.DISABLED_TRAILS_INFO_REGEX:
if re.search(config.DISABLED_TRAILS_INFO_REGEX, trails[key][0]):
del trails[key]
continue
try:
_key = key.decode("utf8").encode("idna")
if _key != key: # for domains with non-ASCII letters (e.g. phishing)
trails[_key] = trails[key]
del trails[key]
key = _key
except:
pass
if not key or re.search(r"\A(?i)\.?[a-z]+\Z", key) and not any(_ in trails[key][1] for _ in ("custom", "static")):
del trails[key]
continue
if re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", key):
if any(_ in trails[key][0] for _ in ("parking site", "sinkhole")) and key in duplicates:
del duplicates[key]
if trails[key][0] == "malware":
trails[key] = ("potential malware site", trails[key][1])
if trails[key][0] == "ransomware":
trails[key] = ("ransomware (malware)", trails[key][1])
if key.startswith("www.") and '/' not in key:
_ = trails[key]
del trails[key]
key = key[len("www."):]
if key:
trails[key] = _
if '?' in key:
_ = trails[key]
del trails[key]
key = key.split('?')[0]
if key:
trails[key] = _
if '//' in key:
_ = trails[key]
del trails[key]
key = key.replace('//', '/')
trails[key] = _
if key != key.lower():
_ = trails[key]
del trails[key]
key = key.lower()
trails[key] = _
if key in duplicates:
_ = trails[key]
others = sorted(duplicates[key] - set((_[1],)))
if others and " (+" not in _[1]:
trails[key] = (_[0], "%s (+%s)" % (_[1], ','.join(others)))
read_whitelist()
for key in trails.keys():
if check_whitelisted(key) or any(key.startswith(_) for _ in BAD_TRAIL_PREFIXES):
del trails[key]
elif re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", key) and (bogon_ip(key) or cdn_ip(key)):
del trails[key]
else:
try:
key.decode("utf8")
trails[key][0].decode("utf8")
trails[key][1].decode("utf8")
except UnicodeDecodeError:
del trails[key]
try:
if trails:
with _fopen(TRAILS_FILE, "w+b") as f:
writer = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)
for trail in trails:
writer.writerow((trail, trails[trail][0], trails[trail][1]))
success = True
except Exception, ex:
print "[x] something went wrong during trails file write '%s' ('%s')" % (TRAILS_FILE, ex)
print "[i] update finished%s" % (40 * " ")
if success:
print "[i] trails stored to '%s'" % TRAILS_FILE
return trails | 2ff83a3681899d6dffa8bdcedcbb7e9839bbc919 | 8,304 |
def bq_to_rows(rows):
"""Reformat BigQuery's output to regular pnguin LOD data
Reformat BigQuery's output format so we can put it into a DataFrame
Args:
rows (dict): A nested list of key-value tuples that need to be converted into a list of dicts
Returns:
list: A list of dictionaries based on the input x
"""
def _reformat(x):
pairs = x.items()
row = {}
for pair in pairs:
key, value = pair
row[key] = value
return row
return [_reformat(x) for x in rows] | 9ff842d1c41d7ebe5c822d4c07b2f26b5524b0fe | 8,305 |
def network_config(session, args):
"""network config functions"""
cmd = pluginlib.exists(args, 'cmd')
if not isinstance(cmd, basestring):
msg = "invalid command '%s'" % str(cmd)
raise pluginlib.PluginError(msg)
return
if cmd not in ALLOWED_NETWORK_CMDS:
msg = "Dom0 execution of '%s' is not permitted" % cmd
raise pluginlib.PluginError(msg)
return
cmd_args = pluginlib.exists(args, 'args')
return ALLOWED_NETWORK_CMDS[cmd](cmd_args) | d2a551166e7d5c445f1cba6404a3e526f4e7ecdd | 8,306 |
def album_id(items, sp_album):
"""Iterate through results to find correct Discogs album id."""
try:
artist = sp_album['artists'][0].lower().replace(" ", "")
except IndexError:
artist = ""
owners = -1
discogs_id = -1
similarity = 0
title = sp_album['name'].lower().replace(" ", "")
for album in items:
# title format: artist - title
index = album['title'].rfind(" - ")
disc_artist = album['title'][:index].lower().replace(" ", "")
disc_title = album['title'][index+3:].lower().replace(" ", "")
# calculate string similarity for artist spelling deviations
jw_similarity = jellyfish.jaro_winkler_similarity(artist, disc_artist)
# comparison for use of symbols in titles (& vs and)
if jellyfish.match_rating_comparison(disc_title, title):
# If they are basically the same, then match the best artist
if jellyfish.match_rating_comparison(artist, disc_artist):
if album['community']['have'] > owners:
owners = album['community']['have']
discogs_id = album['id']
similarity = jw_similarity
# If they are the same and this release is more popular
elif (jw_similarity == similarity and
album['community']['have'] > owners):
owners = album['community']['have']
discogs_id = album['id']
# If a better artist candidate is found
elif jw_similarity > similarity:
owners = album['community']['have']
discogs_id = album['id']
similarity = jw_similarity
# we havent found the artist if the name is not similar enough
if similarity < 0.85:
return -1
return discogs_id | 1c8f0f870c1a0c6c71de115ae6a0d15cf235af6f | 8,308 |
def css_defaults(name, css_dict):
"""Находит первое значение по-умолчанию
background -> #FFF
color -> #FFF
content -> ""
"""
cur = css_dict.get(name) or css_dict.get(name[1:-1])
if cur is None:
return None
default = cur.get('default')
if default is not None:
return default
for v in cur['values']:
if v.startswith('<') and v.endswith('>'):
ret = css_defaults(v, css_dict)
if ret is not None:
return ret | 8418af5e27dfc85a3ec70dea2e7416595ee86a1f | 8,309 |
def yn_zeros(n,nt):
"""Compute nt zeros of the Bessel function Yn(x).
"""
return jnyn_zeros(n,nt)[2] | 384ebc8fec6109de36d3c17d265b53c01a2195b6 | 8,310 |
def get_chebi_parents(chebi_ent):
"""
Get parents of ChEBI entity
:param chebi_ent:
:return:
"""
if hasattr(chebi_ent, 'OntologyParents'):
return [ent.chebiId for ent in chebi_ent.OntologyParents if
(ent.type == 'is a')]
else:
return [] | bfdf3cbfae45c07a9f5f97a85f1c64f680ac49fc | 8,311 |
def average_saccades_time(saccades_times):
"""
:param saccades_times: a list of tuples with (start_time_inclusive, end_time_exclusive)
:return: returns the average time of saccades
"""
return sum([saccade_time[1] - saccade_time[0] for saccade_time in saccades_times]) / len(saccades_times) | a22a5d89ddd4317fa10ed6f5d920f17560028514 | 8,312 |
from typing import Optional
from typing import List
from typing import Tuple
import logging
def solve_tsp_local_search(
distance_matrix: np.ndarray,
x0: Optional[List[int]] = None,
perturbation_scheme: str = "two_opt",
max_processing_time: Optional[float] = None,
log_file: Optional[str] = None,
) -> Tuple[List, float]:
"""Solve a TSP problem with a local search heuristic
Parameters
----------
distance_matrix
Distance matrix of shape (n x n) with the (i, j) entry indicating the
distance from node i to j
x0
Initial permutation. If not provided, it starts with a random path
perturbation_scheme {"ps1", "ps2", "ps3", "ps4", "ps5", "ps6", ["two_opt"]}
Mechanism used to generate new solutions. Defaults to "two_opt"
max_processing_time {None}
Maximum processing time in seconds. If not provided, the method stops
only when a local minimum is obtained
log_file
If not `None`, creates a log file with details about the whole
execution
Returns
-------
A permutation of nodes from 0 to n - 1 that produces the least total
distance obtained (not necessarily optimal).
The total distance the returned permutation produces.
Notes
-----
Here are the steps of the algorithm:
1. Let `x`, `fx` be a initial solution permutation and its objective
value;
2. Perform a neighborhood search in `x`:
2.1 For each `x'` neighbor of `x`, if `fx'` < `fx`, set `x` <- `x'`
and stop;
3. Repeat step 2 until all neighbors of `x` are tried and there is no
improvement. Return `x`, `fx` as solution.
"""
x, fx = setup(distance_matrix, x0)
max_processing_time = max_processing_time or np.inf
if log_file:
fh = logging.FileHandler(log_file)
fh.setLevel(logging.INFO)
logger.addHandler(fh)
logger.setLevel(logging.INFO)
tic = default_timer()
stop_early = False
improvement = True
while improvement and (not stop_early):
improvement = False
for n_index, xn in enumerate(neighborhood_gen[perturbation_scheme](x)):
if default_timer() - tic > max_processing_time:
logger.warning("Stopping early due to time constraints")
stop_early = True
break
fn = compute_permutation_distance(distance_matrix, xn)
logger.info(f"Current value: {fx}; Neighbor: {n_index}")
if fn < fx:
improvement = True
x, fx = xn, fn
break # early stop due to first improvement local search
return x, fx | f1b77b7fb3d1b83d18a7f2ba99d4a266e98f8462 | 8,313 |
def split(self, split_size_or_sections, dim=0, copy=True):
"""Return the split chunks along the given dimension.
Parameters
----------
split_size_or_sections : Union[int, Sequence[int]
The number or size of chunks.
dim : int, optional, default=0
The dimension to split.
copy : bool, optional, default=True
Copy or create the views of input.
Returns
-------
Sequence[dragon.vm.torch.Tensor]
The output tensors.
See Also
--------
`torch.split(...)`_
"""
return array_ops.split(self, split_size_or_sections, dim, copy) | cd6725af62fc0f5cde758e23add206a2ddb7c0af | 8,314 |
def HighFlowSingleInletTwoCompartmentGadoxetateModel(xData2DArray, Ve: float,
Kbh: float, Khe: float,
dummyVariable):
"""This function contains the algorithm for calculating how concentration varies with time
using the High Flow Single Inlet Two Compartment Gadoxetate Model model.
Input Parameters
----------------
xData2DArray - time and AIF concentration 1D arrays stacked into one 2D array.
Ve - Plasma Volume Fraction (decimal fraction)
Khe - Hepatocyte Uptake Rate (mL/min/mL)
Kbh - 'Biliary Efflux Rate (mL/min/mL)'-
Returns
-------
modelConcs - list of calculated concentrations at each of the
time points in array 'time'.
"""
try:
# Logging and exception handling function.
exceptionHandler.modelFunctionInfoLogger()
# In order to use lmfit curve fitting, time and concentration must be
# combined into one function input parameter, a 2D array, then separated into individual
# 1 D arrays
times = xData2DArray[:,0]
AIFconcentrations = xData2DArray[:,1]
Th = (1-Ve)/Kbh
modelConcs = []
modelConcs = (Ve*AIFconcentrations + Khe*Th*tools.expconv(Th, times, AIFconcentrations, 'HighFlowSingleInletTwoCompartmentGadoxetateModel'))
return(modelConcs)
# Exception handling and logging code.
except ZeroDivisionError as zde:
exceptionHandler.handleDivByZeroException(zde)
except Exception as e:
exceptionHandler.handleGeneralException(e) | 18684058926b9362b7a6b495cf1f48fd8c3188e4 | 8,315 |
import struct
import numpy
def read_cz_lsminfo(fh, byteorder, dtype, count, offsetsize):
"""Read CZ_LSMINFO tag from file and return as dict."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError('invalid CZ_LSMINFO structure')
fh.seek(-8, 1)
if structure_size < numpy.dtype(TIFF.CZ_LSMINFO).itemsize:
# adjust structure according to structure_size
lsminfo = []
size = 0
for name, dtype in TIFF.CZ_LSMINFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
lsminfo.append((name, dtype))
else:
lsminfo = TIFF.CZ_LSMINFO
lsminfo = fh.read_record(lsminfo, byteorder=byteorder)
lsminfo = recarray2dict(lsminfo)
# read LSM info subrecords at offsets
for name, reader in TIFF.CZ_LSMINFO_READERS.items():
if reader is None:
continue
offset = lsminfo.get('Offset' + name, 0)
if offset < 8:
continue
fh.seek(offset)
try:
lsminfo[name] = reader(fh)
except ValueError:
pass
return lsminfo | 1bcf4d22315503e2f21fcbacd6a0797fc2fd16a7 | 8,316 |
def mi_alignment(
alignment,
mi_calculator=mi,
null_value=DEFAULT_NULL_VALUE,
excludes=DEFAULT_EXCLUDES,
exclude_handler=None,
):
"""Calc mi over all position pairs in an alignment
alignment: the full alignment object
mi_calculator: a function which calculated MI from two entropies and
their joint entropy -- see mi and normalized_mi for examples
null_value: the value to be returned if mi cannot be calculated (e.g.,
if mi_calculator == normalized_mi and joint_h = 0.0)
excludes: iterable objects containing characters that require special
handling -- by default, if a position contains an exclude, null_value
will be returned. For non-default handling, pass an exclude_handler
exclude_handler: a function which takes a position and returns it
with exclude characters processed in someway.
"""
aln_length = len(alignment)
# Create result matrix
result = zeros((aln_length, aln_length), float)
# Compile postional entropies for each position in the alignment
# I believe I started using this rather than alignment.uncertainties
# b/c the latter relies on converting a ArrayAlignment to an Alignment --
# need to check into this.
positional_entropies = alignment.entropy_per_pos()
# Calculate pairwise MI between position_number and all alignment
# positions, and return the results in a vector.
for i in range(aln_length):
for j in range(i + 1):
result[i, j] = mi_pair(
alignment,
pos1=i,
pos2=j,
h1=positional_entropies[i],
h2=positional_entropies[j],
mi_calculator=mi_calculator,
null_value=null_value,
excludes=excludes,
exclude_handler=exclude_handler,
)
# copy the lower triangle to the upper triangle to make
# the matrix symmetric
ltm_to_symmetric(result)
return result | f576b8c4df018bba787c7c46091e52b70badd9de | 8,317 |
def Jaccard3d(a, b):
"""
This will compute the Jaccard Similarity coefficient for two 3-dimensional volumes
Volumes are expected to be of the same size. We are expecting binary masks -
0's are treated as background and anything else is counted as data
Arguments:
a {Numpy array} -- 3D array with first volume
b {Numpy array} -- 3D array with second volume
Returns:
float
"""
if len(a.shape) != 3 or len(b.shape) != 3:
raise Exception(f"Expecting 3 dimensional inputs, got {a.shape} and {b.shape}")
if a.shape != b.shape:
raise Exception(f"Expecting inputs of the same shape, got {a.shape} and {b.shape}")
# TASK: Write implementation of Jaccard similarity coefficient. Please do not use
# the Dice3D function from above to do the computation ;)
# <YOUR CODE GOES HERE>
overlap = 0
for i in range(a.shape[0]):
for j in range(a.shape[1]):
for j in range(a.shape[2]):
if (a[i, j, k] != 0 and b[i, j, k] != 0):
overlap += 1
all_together = 0
for i in range(a.shape[0]):
for j in range(a.shape[1]):
for j in range(a.shape[2]):
if (a[i, j, k] != 0 or b[i, j, k] != 0):
all_together += 1
return overlap/all_together | a4452e523e484db50b99d36f9ee67c3508678ea6 | 8,318 |
def get_pod_obj(name, namespace=None):
"""
Returns the pod obj for the given pod
Args:
name (str): Name of the resources
Returns:
obj : A pod object
"""
ocp_obj = OCP(api_version='v1', kind=constants.POD, namespace=namespace)
ocp_dict = ocp_obj.get(resource_name=name)
pod_obj = Pod(**ocp_dict)
return pod_obj | 464aa15574ee65672f7963e6e5426753ff98ee72 | 8,319 |
def median_rank(PESSI_SORT, OPTI_SORT, A):
"""
Calculates the median rank of each action.
:param PESSI_SORT: Dictionary containing the actions classified according to the pessimistic procedure.
:param OPTI_SORT: Dictionary containing the actions classified according to the optimistic procedure.
:param A: List containing the names of the actions as strings.
:return med_rank: Dictionary containing the median rank of each action. The keys are the names of the actions
and the values are the median ranks.
"""
med_rank = {}
for a in A:
med_rank[a] = (OPTI_SORT[1][a] + PESSI_SORT[1][a]) / 2
return med_rank | 7f760847ae2a69edf07a593a6ebfb84dce4c4103 | 8,322 |
from typing import Optional
import json
def get_token(
event: ApiGatewayEvent,
_context: LambdaContext,
node_api: Optional[NodeApi] = None
) -> dict:
"""Get token details given a token uid.
*IMPORTANT: Any changes on the parameters should be reflected on the `cacheKeyParameters` for this method.
"""
node_api = node_api or NodeApi()
id = event.query.get("id")
if id is None:
raise ApiError("invalid_parameters")
response = node_api.get_token(id)
return {
"statusCode": 200,
"body": json.dumps(response or UNKNOWN_ERROR_MSG),
"headers": {
"Content-Type": "application/json"
}
} | 7be9e80aef60ad7f5befa3f21b597541eb79c4d0 | 8,323 |
import re
def update_dictionary_entries(old_entries, need_to_add):
"""
Expects dictionary of species entries and
unique list of species (as SMILES) that need to be added
Creates new entries for the species that need to be added
Returns old and new entries
"""
list(set(need_to_add))
for j, species in enumerate(need_to_add):
molecule = rmgpy.molecule.Molecule(smiles=species)
adjlist = molecule.to_adjacency_list()
multiplicity = None
if re.search('(?<=multiplicity ).*', adjlist):
multiplicity = int(
re.search('(?<=multiplicity ).*', adjlist).group(0))
adjlist = re.sub(r'multiplicity .*',
f'multiplicity [{multiplicity}]', adjlist)
group = rmgpy.molecule.group.Group()
group.from_adjacency_list(adjlist)
atom_counts = {}
rel_label = ''
for atom in ['C', 'H', 'O']:
count = species.count(atom)
if count > 0:
rel_label = rel_label + atom + str(count)
assert rel_label != ''
"""
3 Scenerios:
No old -> no need for ID number: max_ID = -1
Only one old -> needs to have ID of 1: max_ID = 0
Multiple old -> needs to have a unique ID: max_ID > 0
"""
new_ID = None
max_ID = -1
duplicate = False
for old_label in old_entries:
old_entry = old_entries[old_label]
if group.is_isomorphic(old_entry.item):
duplicate = True
print(f'{old_entry} found to be duplicate')
continue
if rel_label not in old_label:
continue
if rel_label == old_label and max_ID == -1:
# Atleast one with same label
max_ID = 0
if old_label.find('-') > 0:
old_label, ID_str = old_label.split('-')
ID = int(ID_str)
if old_label == rel_label and ID > max_ID:
# Multiple exisitng labels
max_ID = ID
if max_ID > -1:
# Existing label
new_ID = max_ID + 1
rel_label = rel_label + '-' + str(new_ID)
if not duplicate:
entry = rmgpy.data.base.Entry()
entry.label = rel_label
entry.item = group
assert rel_label not in list(old_entries.keys())
old_entries[rel_label] = entry
entry_labels = [old_entries[key].label for key in old_entries]
assert len(entry_labels) == len(list(set(entry_labels))
), 'Non-unique labels in dictionary'
return old_entries | 9182c42349b76a7e72c3c1c134cb347ed0bd2a2d | 8,324 |
def four_rooms(dims, doorway=1.):
"""
Args:
dims: [dimx, dimy] dimensions of rectangle
doorway: size of doorway
Returns:
adjmat: adjacency matrix
xy: xy coordinates of each state for plotting
labels: empty []
"""
half_x, half_y = (dims[0]*.5, dims[1]*.5)
quarter_x, quarter_y = (dims[0]*.25, dims[1]*.25)
threequarter_x, threequarter_y = (dims[0]*.75, dims[1]*.75)
adj, xy, _ = rectangle_mesh(dims)
room = np.array([xy[:,0] < half_x, xy[:,1] < half_y], dtype=np.float32).T
mask = np.array(distance.squareform(distance.pdist(room, "euclidean")) == 0, dtype=np.float32)
labels = np.sum(room * np.array([[1, 2]]), 1)
doorsx = [quarter_x, threequarter_x, half_x, half_x]
doorsy = [half_y, half_y, quarter_y, threequarter_y]
doors = np.array([doorsx, doorsy]).T
inds = []
for d in doors:
dist_to_door = np.sum(np.abs(xy - d[None, :]), 1)
ind = np.where(dist_to_door == np.min(dist_to_door))[0]
if len(ind) > 1: ind = ind[0]
mask[ind, :] = 1
mask[:, ind] = 1
adj = adj * mask
return adj, xy, labels | 0744ab4b38ab0b5d0b96c53d45c88dc1e37f932e | 8,326 |
def get_verse_url(verse: str) -> str:
"""Creates a URL for the verse text."""
node = CONNECTIONS[verse]
volume = scripture_graph.VOLUMES_SHORT[node['volume']].lower()
if volume == 'bom':
volume = 'bofm'
elif volume == 'd&c':
volume = 'dc-testament'
elif volume == 'pogp':
volume = 'pgp'
book = node['book'].lower()
book_replacements = {
' ': '-',
'.': '',
'&': '',
'—': '-',
}
for old, new in book_replacements.items():
book = book.replace(old, new)
if book == 'd&c':
book = 'dc'
chapter = node['chapter']
i = node['verse']
return parse.urljoin(URL_BASE,
f'{volume}/{book}/{chapter}.{i}?lang=eng#p{i}#{i}') | 37ce47aa6e18e3f550e9adacb3bf16affb6154f8 | 8,327 |
from typing import cast
from typing import List
def get_ws_dependency_annotation(state: GlobalState) -> WSDependencyAnnotation:
""" Returns the world state annotation
:param state: A global state object
"""
annotations = cast(
List[WSDependencyAnnotation],
list(state.world_state.get_annotations(WSDependencyAnnotation)),
)
if len(annotations) == 0:
annotation = WSDependencyAnnotation()
state.world_state.annotate(annotation)
else:
annotation = annotations[0]
return annotation | ba44455594c4a1f63dac5adec95b2efe6a4b2af6 | 8,328 |
def get_gin_confg_strs():
"""
Obtain both the operative and inoperative config strs from gin.
The operative configuration consists of all parameter values used by
configurable functions that are actually called during execution of the
current program, and inoperative configuration consists of all parameter
configured but not used by configurable functions. See
``gin.operative_config_str()`` and ``gin_utils.inoperative_config_str`` for
more detail on how the config is generated.
Returns:
tuple:
- md_operative_config_str (str): a markdown-formatted operative str
- md_inoperative_config_str (str): a markdown-formatted inoperative str
"""
operative_config_str = gin.operative_config_str()
md_operative_config_str = _markdownify_gin_config_str(
operative_config_str,
'All parameter values used by configurable functions that are actually called'
)
md_inoperative_config_str = gin_utils.inoperative_config_str()
if md_inoperative_config_str:
md_inoperative_config_str = _markdownify_gin_config_str(
md_inoperative_config_str,
"All parameter values configured but not used by program. The configured "
"functions are either not called or called with explicit parameter values "
"overriding the config.")
return md_operative_config_str, md_inoperative_config_str | 9f9081aafa6a4a43be37edd4002ee17ac518f5d4 | 8,329 |
def L(x, c, gamma):
"""Return c-centered Lorentzian line shape at x with HWHM gamma"""
return gamma / (np.pi * ((x - c) ** 2 + gamma ** 2)) | 853ba2c978a50f9f43915342caebed2e3d5ead8d | 8,330 |
import socket
import logging
def request_data_from_weather_station():
"""
Send a command to the weather station to get current values.
Returns
-------
bytes
received data, 0 if error occurred
"""
sock = socket.create_connection((WEATHER_HOST, WEATHER_PORT), GRAPHITE_TIMEOUT)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
data = 0
try:
sock.send(CMD_ACT)
data = sock.recv(1024)
except:
logging.error('Error getting data from weather station!')
finally:
sock.close()
if check_crc(data):
return data
logging.error('CRC failed! \r\n Data: %s', data)
return 0 | 0bf28c3cf5db1f14446aa9866b9854b435378439 | 8,331 |
def solution2(arr):
"""improved solution1 #TLE """
if len(arr) == 1:
return arr[0]
max_sum = float('-inf')
l = len(arr)
for i in range(l):
local_sum = arr[i]
local_min = arr[i]
max_sum = max(max_sum, local_sum)
for j in range(i + 1, l):
local_sum += arr[j]
local_min = min(local_min, arr[j])
max_sum = max([max_sum, local_sum, local_sum - local_min])
return max_sum | 835240bb4f70e5b6425a6ac0d2a4210e2c8a0ad0 | 8,332 |
def hillas_parameters_4(pix_x, pix_y, image, recalculate_pixels=True):
"""Compute Hillas parameters for a given shower image.
As for hillas_parameters_3 (old Whipple Fortran code), but more Pythonized
MP: Parameters calculated as Whipple Reynolds et al 1993 paper:
http://adsabs.harvard.edu/abs/1993ApJ...404..206R
which should be the same as one of my ICRC 1991 papers and my thesis.
Parameters
----------
pix_x : array_like
Pixel x-coordinate
pix_y : array_like
Pixel y-coordinate
image : array_like
Pixel values corresponding
recalculate_pixels : Boolean (default True)
Recalculate the pixel higher multiples (e.g., if pixels move
(!) or pixel list changes between calls)
Returns
-------
hillas_parameters : `MomentParameters`
"""
if type(pix_x) == Quantity:
unit = pix_x.unit
assert pix_x.unit == pix_y.unit
else:
unit = 1.0
''' MP: Actually, I don't know why we need to strip the units...
shouldn' the calculations all work with them?'''
pix_x = Quantity(np.asanyarray(pix_x, dtype=np.float64)).value
pix_y = Quantity(np.asanyarray(pix_y, dtype=np.float64)).value
image = np.asanyarray(image, dtype=np.float64)
assert pix_x.shape == image.shape
assert pix_y.shape == image.shape
(sumsig, sumxsig, sumysig, sumx2sig, sumy2sig, sumxysig, sumx3sig,
sumx2ysig, sumxy2sig, sumy3sig) = np.zeros(10)
# Call static_xy to initialize the "static variables"
# Actually, would be nice to just call this if we
# know the pixel positions have changed
static_xy(pix_x, pix_y, recalculate_pixels)
sumsig = image.sum()
sumxsig = (image * pix_x).sum()
sumysig = (image * pix_y).sum()
sumx2sig = (image * static_xy.pix_x2).sum()
sumy2sig = (image * static_xy.pix_y2).sum()
sumxysig = (image * static_xy.pix_xy).sum()
sumx3sig = (image * static_xy.pix_x3).sum()
sumx2ysig = (image * static_xy.pix_x2y).sum()
sumxy2sig = (image * static_xy.pix_xy2).sum()
sumy3sig = (image * static_xy.pix_y3).sum()
sumx4sig = (image * static_xy.pix_x4).sum()
sumx3ysig = (image * static_xy.pix_x3y).sum()
sumx2y2sig = (image * static_xy.pix_x2y2).sum()
sumxy3sig = (image * static_xy.pix_xy3).sum()
sumy4sig = (image * static_xy.pix_y4).sum()
if sumsig == 0.0:
raise (HillasParameterizationError(("Empty pixels!"
"Cannot calculate image parameters."
"Exiting...")))
xm = sumxsig / sumsig
ym = sumysig / sumsig
x2m = sumx2sig / sumsig
y2m = sumy2sig / sumsig
xym = sumxysig / sumsig
x3m = sumx3sig / sumsig
x2ym = sumx2ysig / sumsig
xy2m = sumxy2sig / sumsig
y3m = sumy3sig / sumsig
x4m = sumx4sig / sumsig
x3ym = sumx3ysig / sumsig
x2y2m = sumx2y2sig / sumsig
xy3m = sumxy3sig / sumsig
y4m = sumy4sig / sumsig
'''Doing this should be same as above, but its 4us slower !?
(xm, ym, x2m, y2m, xym, x3m, x2ym, xy2m, y3m) = \
(sumxsig, sumysig, sumx2sig, sumy2sig, sumxysig, sumx3sig,
sumx2ysig, sumxy2sig, sumy3sig) / sumsig'''
xm2 = xm * xm
ym2 = ym * ym
xmym = xm * ym
vx2 = x2m - xm2
vy2 = y2m - ym2
vxy = xym - xmym
vx3 = x3m - 3.0 * xm * x2m + 2.0 * xm2 * xm
vx2y = x2ym - x2m * ym - 2.0 * xym * xm + 2.0 * xm2 * ym
vxy2 = xy2m - y2m * xm - 2.0 * xym * ym + 2.0 * xm * ym2
vy3 = y3m - 3.0 * ym * y2m + 2.0 * ym2 * ym
d = vy2 - vx2
dist = np.sqrt(xm2 + ym2) # could use hypot(xm,ym), but already have squares
phi = np.arctan2(ym, xm)
# -- simpler formulae for length & width suggested CA 901019
z = np.hypot(d, 2.0 * vxy)
length = np.sqrt((vx2 + vy2 + z) / 2.0)
width = np.sqrt((vy2 + vx2 - z) / 2.0)
# -- simpler formula for miss introduced CA, 901101
# -- revised MP 910112
if z == 0.0:
miss = dist
else:
uu = 1 + d / z
vv = 2 - uu
miss = np.sqrt((uu * xm2 + vv * ym2) / 2.0 - xmym * (2.0 * vxy / z))
'''Change to faster caluclation of psi and avoid inaccuracy for hyp
psi = np.arctan2((d + z) * ym + 2.0 * vxy * xm, 2.0 *vxy * ym - (d - z) * xm)
hyp = np.sqrt(2 * z * (z + d)) #! should be simplification of sqrt((d+z)
**2+(2*vxy)**2 ... but not accurate!
hyp = np.hypot(d + z,2 * vxy)
psi = np.arctan2(d + z, 2 * vxy)
cpsi = np.cos(psi)
spsi = np.sin(psi)'''
tanpsi_numer = (d + z) * ym + 2.0 * vxy * xm
tanpsi_denom = 2.0 * vxy * ym - (d - z) * xm
psi = np.arctan2(tanpsi_numer, tanpsi_denom)
# Code to de-interface with historical code
size = sumsig
m_x = xm
m_y = ym
length = length
r = dist
# Note, "skewness" is the same as the Whipple/MP "asymmetry^3", which is fine.
# ... and also, Whipple/MP "asymmetry" * "length" = MAGIC "asymmetry"
# ... so, MAGIC "asymmetry" = MAGIC "skewness"^(1/3) * "length"
# I don't know what MAGIC's "asymmetry" is supposed to be.
# -- Asymmetry and other higher moments
if length != 0.0:
vx4 = x4m - 4.0 * xm * x3m + 6.0 * xm2 * x2m - 3.0 * xm2 * xm2
vx3y = x3ym - 3.0 * xm * x2ym + 3.0 * xm2 * xym - x3m * ym \
+ 3.0 * x2m * xmym - 3.0 * xm2 * xm * ym
vx2y2 = x2y2m - 2.0 * ym * x2ym + x2m * ym2 \
- 2.0 * xm * xy2m + 4.0 * xym * xmym + xm2 * y2m - 3.0 * xm2 * ym2
vxy3 = xy3m - 3.0 * ym * xy2m + 3.0 * ym2 * xym - y3m * xm \
+ 3.0 * y2m * xmym - 3.0 * ym2 * ym * xm
vy4 = y4m - 4.0 * ym * y3m + 6.0 * ym2 * y2m - 3.0 * ym2 * ym2
hyp = np.hypot(tanpsi_numer, tanpsi_denom)
if hyp != 0.:
cpsi = tanpsi_denom / hyp
spsi = tanpsi_numer / hyp
else:
cpsi = 1.
spsi = 0.
cpsi2 = cpsi * cpsi
spsi2 = spsi * spsi
cspsi = cpsi * spsi
sk3bylen3 = (vx3 * cpsi * cpsi2 +
3.0 * vx2y * cpsi2 * spsi +
3.0 * vxy2 * cpsi * spsi2 +
vy3 * spsi * spsi2)
asym = np.copysign(np.power(np.abs(sk3bylen3), 1. / 3.), sk3bylen3) / length
skewness = asym * asym * asym # for MP's asym... (not for MAGIC asym!)
# Kurtosis
kurt = (vx4 * cpsi2 * cpsi2 +
4.0 * vx3y * cpsi2 * cspsi +
6.0 * vx2y2 * cpsi2 * spsi2 +
4.0 * vxy3 * cspsi * spsi2 +
vy4 * spsi2 * spsi2)
kurtosis = kurt / (length * length * length * length)
else: # Skip Higher Moments
asym = 0.0
psi = 0.0
skewness = 0.0
kurtosis = 0.0
# Azwidth not used anymore
# # -- Akerlof azwidth now used, 910112
# d = y2m - x2m
# z = np.sqrt(d * d + 4 * xym * xym)
# azwidth = np.sqrt((x2m + y2m - z) / 2.0)
return MomentParameters(size=size, cen_x=m_x * unit, cen_y=m_y * unit,
length=length * unit, width=width * unit, r=r * unit,
phi=Angle(phi * u.rad),
psi=Angle(psi * u.rad),
miss=miss * unit,
skewness=skewness, kurtosis=kurtosis) | 87fa302b6e6b1b81b66d8e8fb7cc4e34da1583d9 | 8,333 |
from typing import List
from typing import Optional
def create_intrusion_set(
name: str,
aliases: List[str],
author: Identity,
primary_motivation: Optional[str],
secondary_motivations: List[str],
external_references: List[ExternalReference],
object_marking_refs: List[MarkingDefinition],
) -> IntrusionSet:
"""Create an intrusion set."""
return IntrusionSet(
created_by_ref=author,
name=name,
aliases=aliases,
primary_motivation=primary_motivation,
secondary_motivations=secondary_motivations,
labels=["intrusion-set"],
external_references=external_references,
object_marking_refs=object_marking_refs,
) | be8df574ac1be08c724620cf20495922cff5918e | 8,334 |
from ..core import Tensor
def broadcast_to(tensor, shape):
"""Broadcast an tensor to a new shape.
Parameters
----------
tensor : array_like
The tensor to broadcast.
shape : tuple
The shape of the desired array.
Returns
-------
broadcast : Tensor
Raises
------
ValueError
If the tensor is not compatible with the new shape according to Mars's
broadcasting rules.
Examples
--------
>>> import mars.tensor as mt
>>> x = mt.array([1, 2, 3])
>>> mt.broadcast_to(x, (3, 3)).execute()
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
"""
tensor = tensor if isinstance(tensor, Tensor) else astensor(tensor)
shape = tuple(shape) if isinstance(shape, (list, tuple)) else (shape,)
if any(np.isnan(s) for s in tensor.shape):
raise ValueError('input tensor has unknown shape, '
'need to call `.execute()` first')
if tensor.shape == shape:
return tensor
new_ndim = len(shape) - tensor.ndim
if new_ndim < 0:
raise ValueError('input operand has more dimensions than allowed by the axis remapping')
if any(o != n for o, n in zip(tensor.shape, shape[new_ndim:]) if o != 1):
raise ValueError('operands could not be broadcast together '
'with remapped shapes [original->remapped]: {0} '
'and requested shape {1}'.format(tensor.shape, shape))
op = TensorBroadcastTo(shape, dtype=tensor.dtype, sparse=tensor.issparse())
return op(tensor, shape) | 3c738227f98d4ca8a6b1c0cc98cea769b697a987 | 8,335 |
def admin_required(handler_method):
"""Require that a user be an admin.
To use it, decorate your method like this::
@admin_required
def get(self):
...
"""
@wraps(handler_method)
def check_admin(*args, **kwargs):
"""Perform the check."""
if current_user.is_anonymous:
return redirect(url_for('home.login'))
if current_user.is_admin:
return handler_method(*args, **kwargs)
abort(401)
return check_admin | 03cc9e9cd32ab0b239f45c70fffcd85108b0173c | 8,336 |
import requests
def get(path):
"""Get."""
verify()
resp = requests.get(f"{URL}{path}", headers=auth)
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
error_msg(str(e))
return
return resp.json() | c1661ac0e07ff467f15a429fe6fbf09d53a34ef9 | 8,337 |
def combine_dataframes(dfs: [pd.DataFrame]) -> pd.DataFrame:
"""
Receives a list of DataFrames and concatenates them. They must all have the same header.
:param dfs: List of DataFrames
:return: Single concatenated DataFrame
"""
df = pd.concat(dfs, sort=False)
return df | b7c1cd94870638a3a975ea7c4f9c284cbd4ee0a9 | 8,338 |
import numpy
def _approx_sp(salt,pres):
"""Approximate TDl at SP.
Approximate the temperature and liquid water density of sea-ice with
the given salinity and pressure.
:arg float salt: Salinity in kg/kg.
:arg float pres: Pressure in Pa.
:returns: Temperature and liquid water density (both in SI units).
"""
CDIF = _CLIQ-_CICE
R0 = _LILTP/_TTP / CDIF
r1 = (pres-_PTPE) * (_VITP-_VLTP)/_TTP / CDIF
r2 = _RSAL*salt / CDIF
w = -(1 - R0 + r1) * numpy.exp(-(1 - R0 - r2))
negz = 1 - (1 + _E*w)**_C_SP
temp = (1 - R0 + r1)*_TTP/negz
dliq = _dliq_default(temp,pres)
return temp, dliq | b45727ad6f08cead1eb32477c8691233c38e9387 | 8,342 |
def _get_connection_params(resource):
"""Extract connection and params from `resource`."""
args = resource.split(";")
if len(args) > 1:
return args[0], args[1:]
else:
return args[0], [] | 87cdb607027774d58d1c3bf97ac164c48c32395c | 8,343 |
from typing import Union
import requests
from typing import List
from typing import Dict
from typing import Any
from typing import Callable
from typing import cast
import operator
def listdata(
resp: Union[requests.Response, List[Dict[str, Any]]],
*keys: Union[str, Callable[[], bool]],
sort: Union[bool, str] = True,
full: bool = False, # returns dicts instead of tuples
) -> List[tuple]:
"""Return data from a given requests.Response object.
Only non reserved fields are returned.
By default data are converted to List[Tuple[Any]], but if `full` is True,
then List[Dict[str, Any] is returned.
Usage:
>>> data = [
... {'_id': 1, 'odd': 1, 'even': 2},
... {'_id': 2, 'odd': 3, 'even': 4},
... {'_id': 3, 'odd': 5, 'even': 6},
... }
>>> listdata(data)
[
(1, 2),
(3, 4),
(5, 6),
]
>>> listdata(data, 'even')
[2, 4, 6]
>>> listdata(data, 'odd', 'even')
[
(1, 2),
(3, 4),
(5, 6),
]
>>> listdata(data, full=True)
data = [
{'odd': 1, 'even': 2},
{'odd': 3, 'even': 4},
{'odd': 5, 'even': 6},
}
"""
# Prepare data
if isinstance(resp, list):
data = resp
elif resp.headers['content-type'].startswith('text/html'):
data = resp.context
assert resp.status_code == 200, pformat(data)
assert 'data' in data, pformat(data)
assert 'header' in data, pformat(data)
header = data['header']
keys = keys or [k for k in header if not k.startswith('_')]
data = [
{k: v.value for k, v in zip(header, row)}
for row in cast(List[List[Cell]], data['data'])
]
else:
data = resp.json()
assert resp.status_code == 200, pformat(data)
assert '_data' in data, pformat(data)
data = data['_data']
keys = keys or sorted({
k
for d in flatten(data)
for k in d
if not k.startswith('_')
})
# Clean data
if full:
data = [take(keys, row) for row in data]
elif len(keys) == 1:
k = keys[0]
data = [take(k, row) for row in data]
else:
data = [tuple(take(k, row) for k in keys) for row in data]
# Sort
if sort is True:
data = sorted(data, key=str)
elif sort:
if full:
sort_key = operator.itemgetter(sort)
else:
sort_key = operator.itemgetter(keys.index(sort))
data = sorted(data, key=sort_key)
return data | 878df7c56f97a3fe2c92499955bb760888673bbc | 8,345 |
def get_current_pkg():
"""
Returns:
パッケージ名 (str): 常に大文字表記で返ってくる
"""
return eval_foreign_vm_copy("(send *package* :name)") | 16c768dace7a4e88f7d7eb21aab58ce917f2ce43 | 8,347 |
def _normalise_trigger(value: float) -> float:
"""
Helper function used to normalise the controller trigger values into a common range.
:param value: Value to be normalised
:raises: ValueError
:return: Normalised value
"""
return _normalise(value, _HARDWARE_TRIGGER_MIN, _HARDWARE_TRIGGER_MAX, _INTENDED_TRIGGER_MIN, _INTENDED_TRIGGER_MAX) | d5653da9f625896865a3fd9601d3f2707cba6e8c | 8,348 |
def full_process(s):
"""Process string by
-- removing all but letters and numbers
-- trim whitespace
-- force to lower case"""
if s is None:
return ""
#Here we weill force a return of "" if it is of None, empty, or not valid
#Merged from validate_string
try:
s = unicode(s)
len(s) > 0
except TypeError:
return ""
# Keep only Letters and Numbers (see Unicode docs).
string_out = StringProcessor.replace_with_whitespace(s)
# Force into lowercase.
string_out = StringProcessor.to_lower_case(string_out)
# Remove leading and trailing whitespaces.
string_out = StringProcessor.strip(string_out)
return string_out | 071ba938708f170b914576895f7cab1aa8cb1cc3 | 8,349 |
import math
def ldexp(space, x, i):
"""ldexp(x, i) -> x * (2**i)
"""
return math2(space, math.ldexp, x, i) | ef083d77ff36acbc7d7dfede0772e9e8bf34b17a | 8,350 |
def menu():
"""
Print a menu with all the functionalities.
Returns:
The choice of the user.
"""
print "=" * 33 + "\nMENU\n" + "=" * 33
descriptions = ["Load host from external file",
"Add a new host",
"Print selected hosts",
"Check active hosts",
"Select only active hosts",
"Select bots",
"Execute command locally",
"Execute command on bots",
"Run external script",
"Open shell in a host",
"Exit"]
for num, func in enumerate(descriptions):
print "[" + str(num) + "] " + func
choice = raw_input(">>> ")
return choice | 29bdce7c50cea7d9bbc5a27b71c803db91fe4eef | 8,351 |
from typing import Optional
from typing import Container
from typing import List
async def objects_get(bucket: Optional[str] = None,
index: Index = Depends(Provide[Container.index]),
buckets: Buckets = Depends(Provide[Container.buckets])) -> List[Object]:
"""
searches for objects
"""
if not bucket:
return index.get_all()
buckets.validate_bucket(bucket)
return index.get_all(bucket) | f6949922ac5c355469fbaf450758180ac422f33a | 8,352 |
import json
def thumbnail_create(request, repo_id):
"""create thumbnail from repo file list
return thumbnail src
"""
content_type = 'application/json; charset=utf-8'
result = {}
repo = get_repo(repo_id)
if not repo:
err_msg = _(u"Library does not exist.")
return HttpResponse(json.dumps({"error": err_msg}), status=400,
content_type=content_type)
path = request.GET.get('path', None)
if not path:
err_msg = _(u"Invalid arguments.")
return HttpResponse(json.dumps({"error": err_msg}), status=400,
content_type=content_type)
if repo.encrypted or not ENABLE_THUMBNAIL or \
check_folder_permission(request, repo_id, path) is None:
err_msg = _(u"Permission denied.")
return HttpResponse(json.dumps({"error": err_msg}), status=403,
content_type=content_type)
size = request.GET.get('size', THUMBNAIL_DEFAULT_SIZE)
success, status_code = generate_thumbnail(request, repo_id, size, path)
if success:
src = get_thumbnail_src(repo_id, size, path)
result['encoded_thumbnail_src'] = urlquote(src)
return HttpResponse(json.dumps(result), content_type=content_type)
else:
err_msg = _('Failed to create thumbnail.')
return HttpResponse(json.dumps({'err_msg': err_msg}),
status=status_code, content_type=content_type) | 876f9af7d61336f0f91f0b7277943265fb6e7a35 | 8,353 |
import uvloop
import asyncio
def initialize_event_loop():
"""Attempt to use uvloop."""
try:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
return asyncio.get_event_loop() | cba229f5330bc89a60607f9523d3727b6db30094 | 8,354 |
def _setup_pgops(multi_actions=False,
normalise_entropy=False,
sequence_length=4,
batch_size=2,
num_mvn_actions=3,
num_discrete_actions=5):
"""Setup polices, actions, policy_vars and (optionally) entropy_scale_op."""
t = sequence_length
b = batch_size
a = num_mvn_actions
c = num_discrete_actions
# MVN actions
mu = tf.placeholder(tf.float32, shape=(t, b, a))
sigma = tf.placeholder(tf.float32, shape=(t, b, a))
mvn_policies = tfp.distributions.MultivariateNormalDiag(
loc=mu, scale_diag=sigma)
mvn_actions = tf.placeholder(tf.float32, shape=(t, b, a))
mvn_params = [mu, sigma]
if multi_actions:
# Create a list of n_cat Categorical distributions
n_cat = 2
cat_logits = [tf.placeholder(tf.float32, shape=(t, b, c))
for _ in xrange(n_cat)]
cat_policies = [tfp.distributions.Categorical(logits=logits)
for logits in cat_logits]
cat_actions = [tf.placeholder(tf.int32, shape=(t, b))
for _ in xrange(n_cat)]
cat_params = [[logits] for logits in cat_logits]
# Create an exponential distribution
exp_rate = tf.placeholder(tf.float32, shape=(t, b))
exp_policies = tfp.distributions.Exponential(rate=exp_rate)
exp_actions = tf.placeholder(tf.float32, shape=(t, b))
exp_params = [exp_rate]
# Nest all policies and nest corresponding actions and parameters
policies = [mvn_policies, cat_policies, exp_policies]
actions = [mvn_actions, cat_actions, exp_actions]
policy_vars = [mvn_params, cat_params, exp_params]
else:
# No nested policy structure
policies = mvn_policies
actions = mvn_actions
policy_vars = mvn_params
entropy_scale_op = None
if normalise_entropy:
# Scale op that divides by total action dims
def scale_op(policies):
policies = nest.flatten(policies)
num_dims = [tf.to_float(tf.reduce_prod(policy.event_shape_tensor()))
for policy in policies]
return 1. / tf.reduce_sum(tf.stack(num_dims))
entropy_scale_op = scale_op
return policies, actions, policy_vars, entropy_scale_op | 5d6ddc58db39496fed3c99c214c1f835ee49f2ea | 8,355 |
async def definition_delete(hub, ctx, name, **kwargs):
"""
.. versionadded:: 1.0.0
Delete a policy definition.
:param name: The name of the policy definition to delete.
CLI Example:
.. code-block:: bash
azurerm.resource.policy.definition_delete testpolicy
"""
result = False
polconn = await hub.exec.azurerm.utils.get_client(ctx, "policy", **kwargs)
try:
# pylint: disable=unused-variable
policy = polconn.policy_definitions.delete(policy_definition_name=name)
result = True
except (CloudError, ErrorResponseException) as exc:
await hub.exec.azurerm.utils.log_cloud_error("resource", str(exc), **kwargs)
return result | 7f3e6b2b3b6bbcea590a042b923fd797a56840ee | 8,356 |
import optparse
def parse_options(argv):
"""Parses and checks the command-line options.
Returns:
A tuple containing the options structure and a list of categories to
be traced.
"""
usage = 'Usage: %prog [options] [category1 [category2 ...]]'
desc = 'Example: %prog -b 32768 -t 15 gfx input view sched freq'
parser = optparse.OptionParser(usage=usage, description=desc)
parser.add_option('-o', dest='output_file', help='write trace output to FILE',
default=None, metavar='FILE')
parser.add_option('-t', '--time', dest='trace_time', type='int',
help='trace for N seconds', metavar='N')
parser.add_option('-l', '--list-categories', dest='list_categories',
default=False, action='store_true',
help='list the available categories and exit')
parser.add_option('-j', '--json', dest='write_json',
default=False, action='store_true',
help='write a JSON file')
parser.add_option('--link-assets', dest='link_assets', default=False,
action='store_true',
help='(deprecated)')
parser.add_option('--from-file', dest='from_file', action='store',
help='read the trace from a file (compressed) rather than'
'running a live trace')
parser.add_option('--asset-dir', dest='asset_dir', default='trace-viewer',
type='string', help='(deprecated)')
parser.add_option('-e', '--serial', dest='device_serial_number',
default=_get_default_serial(),
type='string', help='adb device serial number')
parser.add_option('--target', dest='target', default='android', type='string',
help='chose tracing target (android or linux)')
parser.add_option('--timeout', dest='timeout', type='int',
help='timeout for start and stop tracing (seconds)')
parser.add_option('--collection-timeout', dest='collection_timeout',
type='int', help='timeout for data collection (seconds)')
atrace_ftrace_options = optparse.OptionGroup(parser,
'Atrace and Ftrace options')
atrace_ftrace_options.add_option('-b', '--buf-size', dest='trace_buf_size',
type='int', help='use a trace buffer size '
' of N KB', metavar='N')
atrace_ftrace_options.add_option('--no-fix-threads', dest='fix_threads',
default=True, action='store_false',
help='don\'t fix missing or truncated '
'thread names')
atrace_ftrace_options.add_option('--no-fix-tgids', dest='fix_tgids',
default=True, action='store_false',
help='Do not run extra commands to restore'
' missing thread to thread group id '
'mappings.')
atrace_ftrace_options.add_option('--no-fix-circular', dest='fix_circular',
default=True, action='store_false',
help='don\'t fix truncated circular traces')
parser.add_option_group(atrace_ftrace_options)
# Add the other agent parsing options to the parser. For Systrace on the
# command line, all agents are added. For Android, only the compatible agents
# will be added.
for module in ALL_MODULES:
option_group = module.add_options(parser)
if option_group:
parser.add_option_group(option_group)
options, categories = parser.parse_args(argv[1:])
if options.output_file is None:
options.output_file = 'trace.json' if options.write_json else 'trace.html'
if options.link_assets or options.asset_dir != 'trace-viewer':
parser.error('--link-assets and --asset-dir are deprecated.')
if options.trace_time and options.trace_time < 0:
parser.error('the trace time must be a non-negative number')
if (options.trace_buf_size is not None) and (options.trace_buf_size <= 0):
parser.error('the trace buffer size must be a positive number')
return (options, categories) | df6910fb6600f8c4573eced74dfcd8bc6ec1a5ad | 8,357 |
def get_data_for_file(folder, ports):
"""Parses the pcap files in the specified folder, and outputs data for the specified ports
"""
# Load private keys and port->provider mappings
keys, providers, nodes = read_keys(os.path.join(folder, 'keys'))
print 'Loading packets'
# Load packets
with open(os.path.join(folder, 'network.pcap'), 'rb') as f:
cap = dpkt.pcap.Reader(f)
packets = []
for ts, buf in cap:
eth = dpkt.sll.SLL(buf)
if eth.type != 3:
# tcpdump captures both type 3 and 4 packets, resulting in duplicates
continue
eth.time = ts
try:
eth.data.src = socket.inet_ntoa(eth.data.src)
eth.data.dst = socket.inet_ntoa(eth.data.dst)
except:
pass
packets.append(eth)
# Load config
config = json.load(open(os.path.join(folder, 'config.json')))
# Invert exponential parameters to get rate
loops, drop, payload = 1/config['EXP_PARAMS_LOOPS'], 1 / \
config['EXP_PARAMS_DROP'], 1/config['EXP_PARAMS_PAYLOAD']
lambda_total = loops + drop + payload
print "λ_loop = %f, λ_drop = %f, λ_payload = %f, λ = %f" % (
loops, drop, payload, lambda_total)
data = []
for port in ports:
print "Parsing port %d from %s" % (port, folder)
# Filter packets by source port
filtered = [x for x in packets if x.data.data.sport == port]
print "Analysing all packets"
all_mean = analyse_packets(filtered, packets[0].time, packets[-1].time)
print "-----------------"
decrypted_filtered = [(x, decrypt_packet(
x, keys[nodes[get_addr(x.data.dst, x.data.data.dport)]], keys)) for x in filtered]
real_filtered = [
x for x, decrypt in decrypted_filtered if decrypt[0] == 'REAL']
if len(real_filtered) == 0:
print "Warning, 0 real packets"
real_mean = None
else:
print "Analysing real packets"
real_mean = analyse_packets(
real_filtered, packets[0].time, packets[-1].time)
print "\n-----------------\n"
data.append((port, loops, drop, payload,
lambda_total, all_mean, real_mean))
return data | be81404b54231bde3444d3b3545cafa1c836c074 | 8,358 |
async def create_channel_in_db(
context: 'Context',
game_config: 'GameConfig',
channel_id: str,
finished: bool = False
) -> Channel:
"""Utility function to create a channel in the database
:param context: The Discord Context.
:param game_config: The GameConfig to use for extra info.
:param finished: Whether or not the channel is finished.
:return: The Channel that was created"""
owner = (await sync_to_async(User.objects.get_or_create)(id=context.author.id))[0]
return await sync_to_async(Channel.objects.create)(
id=channel_id,
owner=owner, guild_id=context.guild.id, game=game_config.game,
finished=finished
) | a2ea09b436c4eeabd37c0c8220d791d89db3912f | 8,359 |
def retry(times, func, *args, **kwargs):
"""Try to execute multiple times function mitigating exceptions.
:param times: Amount of attempts to execute function
:param func: Function that should be executed
:param args: *args that are passed to func
:param kwargs: **kwargs that are passed to func
:raises Exception: Raise any exception that can raise func
:returns: Result of func(*args, **kwargs)
"""
for i in range(times):
try:
return func(*args, **kwargs)
except Exception:
if i == times - 1:
raise | 7e9fd482a70409d62ea108ddaa83440fcd2b024f | 8,360 |
def _apply_prediction(G, func, ebunch=None):
"""Applies the given function to each edge in the specified iterable
of edges.
`G` is an instance of :class:`networkx.Graph`.
`ebunch` is an iterable of pairs of nodes. If not specified, all
non-edges in the graph `G` will be used.
"""
if ebunch is None:
ebunch = nx.non_edges(G)
return sorted([(u, v, func(G, u, v)) for u, v in ebunch], key = lambda t:t[2], reverse = True) | 5e046bf7608337f6ed046a71b8a3983f53109d46 | 8,361 |
def import_class(class_object):
"""
Import a class given a string with its name in the format module.module.classname
"""
d = class_object.rfind(".")
class_name = class_object[d + 1:len(class_object)]
m = __import__(class_object[0:d], globals(), locals(), [class_name])
return getattr(m, class_name) | 82df3ed7d646bd423ccefacc00493e917f13c430 | 8,362 |
def anonymous_fun_0_(empty_closure_0_):
"""
empty_closure_0_: ()
"""
def anonymous_fun_1_(par_map_input_1_):
"""
par_map_input_1_: Double
"""
def anonymous_fun_2_(par_map_input_0_):
"""
par_map_input_0_: Double
"""
def anonymous_fun_3_(fused_input_0_):
"""
fused_input_0_: (Double,Double)
"""
def anonymous_fun_4_(maybeRow_1_):
"""
maybeRow_1_: Maybe Double
"""
if maybeRow_1_ is not None:
cond_result_0_ = maybeRow_1_
else:
cond_result_0_ = 0.0
return cond_result_0_
def anonymous_fun_5_(row_1_):
"""
row_1_: Double
"""
if row_1_ < 5.0:
cond_result_1_ = row_1_
else:
cond_result_1_ = None
return cond_result_1_
def anonymous_fun_6_(maybeRow_0_):
"""
maybeRow_0_: Maybe Double
"""
if maybeRow_0_ is not None:
cond_result_2_ = maybeRow_0_
else:
cond_result_2_ = 0.0
return cond_result_2_
def anonymous_fun_7_(row_0_):
"""
row_0_: Double
"""
if row_0_ > 10.0:
cond_result_3_ = row_0_
else:
cond_result_3_ = None
return cond_result_3_
return ((fun_comp(anonymous_fun_4_,anonymous_fun_5_))(fused_input_0_[0]),(fun_comp(anonymous_fun_6_,anonymous_fun_7_))(fused_input_0_[1]))
def anonymous_fun_8_(dbrow_0_):
"""
dbrow_0_: Double
"""
return (dbrow_0_,dbrow_0_)
def anonymous_fun_9_(fused_input_1_):
"""
fused_input_1_: (Double,Double)
"""
def anonymous_fun_10_(maybeRow_1_):
"""
maybeRow_1_: Maybe Double
"""
if maybeRow_1_ is not None:
cond_result_4_ = maybeRow_1_
else:
cond_result_4_ = 0.0
return cond_result_4_
def anonymous_fun_11_(row_1_):
"""
row_1_: Double
"""
if row_1_ < 5.0:
cond_result_5_ = row_1_
else:
cond_result_5_ = None
return cond_result_5_
def anonymous_fun_12_(maybeRow_0_):
"""
maybeRow_0_: Maybe Double
"""
if maybeRow_0_ is not None:
cond_result_6_ = maybeRow_0_
else:
cond_result_6_ = 0.0
return cond_result_6_
def anonymous_fun_13_(row_0_):
"""
row_0_: Double
"""
if row_0_ > 10.0:
cond_result_7_ = row_0_
else:
cond_result_7_ = None
return cond_result_7_
return ((fun_comp(anonymous_fun_10_,anonymous_fun_11_))(fused_input_1_[0]),(fun_comp(anonymous_fun_12_,anonymous_fun_13_))(fused_input_1_[1]))
def anonymous_fun_14_(dbrow_1_):
"""
dbrow_1_: Double
"""
return (dbrow_1_,dbrow_1_)
return ((fun_comp(anonymous_fun_3_,anonymous_fun_8_))(par_map_input_0_),(fun_comp(anonymous_fun_9_,anonymous_fun_14_))(par_map_input_0_))
def anonymous_fun_15_(fused_input_2_):
"""
fused_input_2_: (Double,Double)
"""
def anonymous_fun_16_(maybeRow_1_):
"""
maybeRow_1_: Maybe Double
"""
if maybeRow_1_ is not None:
cond_result_8_ = maybeRow_1_
else:
cond_result_8_ = 0.0
return cond_result_8_
def anonymous_fun_17_(row_1_):
"""
row_1_: Double
"""
if row_1_ < 5.0:
cond_result_9_ = row_1_
else:
cond_result_9_ = None
return cond_result_9_
def anonymous_fun_18_(maybeRow_0_):
"""
maybeRow_0_: Maybe Double
"""
if maybeRow_0_ is not None:
cond_result_10_ = maybeRow_0_
else:
cond_result_10_ = 0.0
return cond_result_10_
def anonymous_fun_19_(row_0_):
"""
row_0_: Double
"""
if row_0_ > 10.0:
cond_result_11_ = row_0_
else:
cond_result_11_ = None
return cond_result_11_
return ((fun_comp(anonymous_fun_16_,anonymous_fun_17_))(fused_input_2_[0]),(fun_comp(anonymous_fun_18_,anonymous_fun_19_))(fused_input_2_[1]))
def anonymous_fun_20_(dbrow_2_):
"""
dbrow_2_: Double
"""
return (dbrow_2_,dbrow_2_)
return (anonymous_fun_2_(par_map_input_1_),(fun_comp(anonymous_fun_15_,anonymous_fun_20_))(par_map_input_1_))
return anonymous_fun_1_ | 2ea827153fadc359d056f4b981dbb5d3bf3711ee | 8,363 |
def get_examples(mode='train'):
"""
dataset[0][0] examples
"""
examples = {
'train':
({'id': '0a25cb4bc1ab6f474c699884e04601e4', 'title': '', 'context': '第35集雪见缓缓张开眼睛,景天又惊又喜之际,长卿和紫萱的仙船驶至,见众人无恙,'
'也十分高兴。众人登船,用尽合力把自身的真气和水分输给她。雪见终于醒过来了,但却一脸木然,全无反应。众人向常胤求助,却发现人世界竟没有雪见的身世纪录。长卿询问清微的身世,'
'清微语带双关说一切上了天界便有答案。长卿驾驶仙船,众人决定立马动身,往天界而去。众人来到一荒山,长卿指出,魔界和天界相连。由魔界进入通过神魔之井,便可登天。众人至魔界入口,'
'仿若一黑色的蝙蝠洞,但始终无法进入。后来花楹发现只要有翅膀便能飞入。于是景天等人打下许多乌鸦,模仿重楼的翅膀,制作数对翅膀状巨物。刚佩戴在身,便被吸入洞口。众人摔落在地,'
'抬头发现魔界守卫。景天和众魔套交情,自称和魔尊重楼相熟,众魔不理,打了起来。', 'question': '仙剑奇侠传3第几集上天界', 'answers': ['第35集'], 'answer_starts': [0]}),
}
return examples[mode] | 0b5fb45bcac847cd3f7e7b3e5b264e350c891211 | 8,364 |
from IPython.core.display import HTML
def display_tables(tables, max_rows=10, datetime_fmt='%Y-%m-%d %H:%M:%S', row=True):
"""Display mutiple tables side by side on a Jupyter Notebook.
Args:
tables (dict[str, DataFrame]):
``dict`` containing table names and pandas DataFrames.
max_rows (int):
Max rows to show per table. Defaults to 10.
datetime_fmt (str):
Format with which to display datetime columns.
"""
# Import here to avoid making IPython a hard dependency
names = []
data = []
for name, table in tables.items():
table = table.copy()
for column in table.columns:
column_data = table[column]
if column_data.dtype.kind == 'M':
table[column] = column_data.dt.strftime(datetime_fmt)
names.append('<td style="text-align:left"><b>{}</b></td>'.format(name))
data.append('<td>{}</td>'.format(table.head(max_rows).to_html(index=False)))
if row:
html = '<table><tr>{}</tr><tr>{}</tr></table>'.format(
''.join(names),
''.join(data),
)
else:
rows = [
'<tr>{}</tr><tr>{}</tr>'.format(name, table)
for name, table in zip(names, data)
]
html = '<table>{}</table>'.format(''.join(rows))
return HTML(html) | 904a900e97aab4809ea5057025a5a7a075429942 | 8,365 |
def prepare_default_result_dict(key, done, nodes):
"""Prepares the default result `dict` using common values returned by any
operation on the DHT.
Returns:
dict: with keys `(k, d, n)` for the key, done and nodes; `n` is a list
of `dict` with keys `(i, a, x)` for id, address, and expiration.
"""
d = {
"k": key,
"d": done,
}
nb = []
for n in nodes:
_node = n.getNode()
nb.append({
"i": n.getId().toString(),
"a": _node.getAddr(),
"x": _node.isExpired()
})
d["n"] = nb
return d | 420beb66352fee7b4d38f6b4cf628cbaa86a03df | 8,367 |
def MatchScorer(match, mismatch):
"""Factory function that returns a score function set to match and mismatch.
match and mismatch should both be numbers. Typically, match should be
positive and mismatch should be negative.
Resulting function has signature f(x,y) -> number.
"""
def scorer(x, y):
if x == y:
return match
else:
return mismatch
return scorer | fe3829efc64cb4d9785e52b8af6949c147481902 | 8,368 |
import random
def random_choice(context: RuntimeContext, *choices):
"""Template helper for random choices.
Supports structures like this:
random_choice:
- a
- b
- <<c>>
Or like this:
random_choice:
- choice:
pick: A
probability: 50%
- choice:
pick: A
probability: 50%
Probabilities are really just weights and don't need to
add up to 100.
Pick-items can have arbitrary internal complexity.
Pick-items are lazily evaluated.
"""
if not choices:
raise ValueError("No choices supplied!")
if getattr(choices[0], "function_name", None) == "choice":
choices = [choice.render(context) for choice in choices]
rc = weighted_choice(choices)
else:
rc = random.choice(choices)
if hasattr(rc, "render"):
rc = rc.render(context)
return rc | cc74c4106e2263e4b46ef25ed5cb83839040bb5f | 8,369 |
def _compute_paddings(height_pad_amt, width_pad_amt, patch_axes):
"""Convert the total pad amounts to the format needed by tf.pad()."""
top_pad = height_pad_amt // 2
bottom_pad = height_pad_amt - top_pad
left_pad = width_pad_amt // 2
right_pad = width_pad_amt - left_pad
paddings = [[0, 0] for _ in range(4)]
paddings[patch_axes[0]] = [top_pad, bottom_pad]
paddings[patch_axes[1]] = [left_pad, right_pad]
return paddings | 3a5154ba0fa6808bc6dc8e20fcb4203324762ba9 | 8,370 |
def tab_size(computer, name, value):
"""Compute the ``tab-size`` property."""
if isinstance(value, int):
return value
else:
return length(computer, name, value) | f121cc308f4c88e021e240767ae03479a26a46f6 | 8,371 |
def match_complete(user_id=""):
"""Switch 'complete' to true in matches table for user, return tallies."""
print("match_complete", user_id)
user = sm.get_user(user_id)
# Note: 0/1 used for 'complete' b/c Booleans not allowed in SimpleObjects
this_match, i = current_match_i(user)
temp = this_match['complete']
temp[i] = 1
this_match['complete'] = temp
return _get_tallies(user) | 1af499c671f209ba8bc9333e372947c90b9a2b8c | 8,372 |
def compute_range_map(flow,
downsampling_factor=1,
reduce_downsampling_bias=True,
resize_output=True):
"""Count how often each coordinate is sampled.
Counts are assigned to the integer coordinates around the sampled coordinates
using weights from bilinear interpolation.
Args:
flow: A float tensor of shape (batch size x height x width x 2) that
represents a dense flow field.
downsampling_factor: An integer, by which factor to downsample the output
resolution relative to the input resolution. Downsampling increases the
bin size but decreases the resolution of the output. The output is
normalized such that zero flow input will produce a constant ones output.
reduce_downsampling_bias: A boolean, whether to reduce the downsampling bias
near the image boundaries by padding the flow field.
resize_output: A boolean, whether to resize the output at the input
resolution.
Returns:
A float tensor of shape [batch_size, height, width, 1] that denotes how
often each pixel is sampled.
"""
# Get input shape.
input_shape = flow.shape.as_list()
if len(input_shape) != 4:
raise NotImplementedError()
batch_size, input_height, input_width, _ = input_shape
flow_height = input_height
flow_width = input_width
# Apply downsampling (and move the coordinate frame appropriately).
output_height = input_height // downsampling_factor
output_width = input_width // downsampling_factor
if downsampling_factor > 1:
# Reduce the bias that comes from downsampling, where pixels at the edge
# will get lower counts that pixels in the middle of the image, by padding
# the flow field.
if reduce_downsampling_bias:
p = downsampling_factor // 2
flow_height += 2 * p
flow_width += 2 * p
# Apply padding in multiple steps to padd with the values on the edge.
for _ in range(p):
flow = tf.pad(
tensor=flow,
paddings=[[0, 0], [1, 1], [1, 1], [0, 0]],
mode='SYMMETRIC')
coords = flow_to_warp(flow) - p
# Update the coordinate frame to the downsampled one.
coords = (coords + (1 - downsampling_factor) * 0.5) / downsampling_factor
elif downsampling_factor == 1:
coords = flow_to_warp(flow)#after warpping image
else:
raise ValueError('downsampling_factor must be an integer >= 1.')
# Split coordinates into an integer part and a float offset for interpolation.
coords_floor = tf.floor(coords)#返回一个具有相同类型的张量,不大于的最大整数
coords_offset = coords - coords_floor
coords_floor = tf.cast(coords_floor, 'int32')
# Define a batch offset for flattened indexes into all pixels.
batch_range = tf.reshape(tf.range(batch_size), [batch_size, 1, 1])
idx_batch_offset = tf.tile(
batch_range, [1, flow_height, flow_width]) * output_height * output_width
# Flatten everything.
coords_floor_flattened = tf.reshape(coords_floor, [-1, 2])#zhengshu
coords_offset_flattened = tf.reshape(coords_offset, [-1, 2])#xiaoshu
idx_batch_offset_flattened = tf.reshape(idx_batch_offset, [-1])#suoyin
# Initialize results.
idxs_list = []
weights_list = []
# Loop over differences di and dj to the four neighboring pixels.
for di in range(2):
for dj in range(2):
# Compute the neighboring pixel coordinates.
idxs_i = coords_floor_flattened[:, 0] + di
idxs_j = coords_floor_flattened[:, 1] + dj
# Compute the flat index into all pixels.
idxs = idx_batch_offset_flattened + idxs_i * output_width + idxs_j
# Only count valid pixels.
mask = tf.reshape(
tf.compat.v1.where(
tf.logical_and(
tf.logical_and(idxs_i >= 0, idxs_i < output_height),
tf.logical_and(idxs_j >= 0, idxs_j < output_width))), [-1])
valid_idxs = tf.gather(idxs, mask)
valid_offsets = tf.gather(coords_offset_flattened, mask)
# Compute weights according to bilinear interpolation.
weights_i = (1. - di) - (-1)**di * valid_offsets[:, 0]
weights_j = (1. - dj) - (-1)**dj * valid_offsets[:, 1]
weights = weights_i * weights_j
# Append indices and weights to the corresponding list.
idxs_list.append(valid_idxs)
weights_list.append(weights)
# Concatenate everything.
idxs = tf.concat(idxs_list, axis=0)
weights = tf.concat(weights_list, axis=0)
# Sum up weights for each pixel and reshape the result.
counts = tf.math.unsorted_segment_sum(
weights, idxs, batch_size * output_height * output_width)
count_image = tf.reshape(counts, [batch_size, output_height, output_width, 1])
if downsampling_factor > 1:
# Normalize the count image so that downsampling does not affect the counts.
count_image /= downsampling_factor**2
if resize_output:
count_image = resize(
count_image, input_height, input_width, is_flow=False)
return count_image | fa73194435ae893dcd359f93f1488a6b654f8d31 | 8,373 |
from typing import List
def get_wer(refs: List[str], hyps: List[str]):
"""
args:
refs (list of str): reference texts
hyps (list of str): hypothesis/prediction texts
"""
n_words, n_errors = 0, 0
for ref, hyp in zip(refs, hyps):
ref, hyp = ref.split(), hyp.split()
n_words += len(ref)
n_errors += editdistance.eval(ref, hyp)
return safe_divide(n_errors, n_words) | fb142f4d048bffca1a1119e4a2e7c68e1effcbfc | 8,374 |
def get_first(somelist, function):
""" Returns the first item of somelist for which function(item) is True """
for item in somelist:
if function(item):
return item
return None | 81976910c46102d3b15803d215f3bf5a554f9beb | 8,375 |
def np_cross(a, b):
"""
Simple numba compatible cross product of vectors
"""
return np.array([
a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0],
]) | 0d19a1bfdf7bf5d6835203f61654edc8263b3bbd | 8,376 |
import itertools
def remove_duplicates(llist):
"""
Removes any and all duplicate entries in the specified list.
This function is intended to be used during dataset merging and
therefore must be able to handle list-of-lists.
:param llist: The list to prune.
:return: A list of unique elements only.
"""
if not llist:
return []
llist.sort()
return [x for x, _ in itertools.groupby(llist)] | cbdf1a4db99a7a5fac37f25776cc1387ed8c54e0 | 8,379 |
def images_in_bbox(bbox: dict, **filters) -> str:
"""
Gets a complete list of images with custom filter within a BBox
:param bbox: Bounding box coordinates
Format::
>>> {
... 'west': 'BOUNDARY_FROM_WEST',
... 'south': 'BOUNDARY_FROM_SOUTH',
... 'east': 'BOUNDARY_FROM_EAST',
... 'north': 'BOUNDARY_FROM_NORTH'
... }
:type bbox: dict
:param filters: Different filters that may be applied to the output
Example filters::
- max_captured_at
- min_captured_at
- image_type: pano, flat, or all
- compass_angle
- sequence_id
- organization_id
:type filters: dict
:return: Output is a GeoJSON string that represents all the within a bbox after passing given
filters
:rtype: str
Usage::
>>> import mapillary as mly
>>> mly.interface.set_access_token('MLY|XXX')
>>> mly.interface.images_in_bbox(
... bbox={
... 'west': 'BOUNDARY_FROM_WEST',
... 'south': 'BOUNDARY_FROM_SOUTH',
... 'east': 'BOUNDARY_FROM_EAST',
... 'north': 'BOUNDARY_FROM_NORTH'
... },
... max_captured_at='YYYY-MM-DD HH:MM:SS',
... min_captured_at='YYYY-MM-DD HH:MM:SS',
... image_type='pano',
... compass_angle=(0, 360),
... sequence_id='SEQUENCE_ID',
... organization_id='ORG_ID'
... )
"""
return image.get_images_in_bbox_controller(
bounding_box=bbox, layer="image", zoom=14, filters=filters
) | 317554d0f666753cdfc8a3657f7f0b92d5af141d | 8,380 |
def find_start_time_from_afl(project_base_dir):
"""
Finds the start time of a project from afl directories.
This time is taken from the fuzzer_stats entry of
the first config iteration's fuzzer.
"""
try:
first_main_dir = main_dirs_for_proj(project_base_dir)[0]
except:
#if fuzzware-project dir exists but contains no mainXXX dirs
return 0
first_fuzzer_dir = fuzzer_dirs_for_main_dir(first_main_dir)[0]
fuzzer_stats_path = first_fuzzer_dir.joinpath("fuzzer_stats")
with open(fuzzer_stats_path, "r") as f:
start_time = int(f.readline().split(": ")[1])
return start_time | f8f21b65e1901615e16953da48ac39008dcb240b | 8,381 |
def filter_none_values(d, recursive=True):
"""
Returns a filtered copy of a dict, with all keys associated with 'None' values removed.
adapted from: http://stackoverflow.com/q/20558699
adapted from: http://stackoverflow.com/a/20558778
:param d: a dict-like object.
:param recursive: If True, performs the operation recursively on inner elements of the object.
:return: a new dict (of the same type as the original) containing the original dict's values,
except as modified per this function's documented effects.
>>> filter_none_values(None) is None
True
>>> filter_none_values(1)
Traceback (most recent call last):
TypeError: d is not a dict-like object.
>>> filter_none_values({})
{}
>>> filter_none_values({'a': 1, 'b': None, 'c': '3'})
{'a': 1, 'c': '3'}
>>> filter_none_values({'a': 1, 'b': [1, None, 3], 'c': '3'})
{'a': 1, 'c': '3', 'b': [1, 3]}
>>> filter_none_values({'a': 1, 'b': [1, {'ba': 1, 'bb': None, 'bc': '3'}, 3], 'c': '3'})
{'a': 1, 'c': '3', 'b': [1, {'ba': 1, 'bc': '3'}, 3]}
>>> from collections import OrderedDict as od; filter_none_values(od((('a', 1), ('b', None), ('c', '3'))))
OrderedDict([('a', 1), ('c', '3')])
>>> from collections import OrderedDict as od; filter_none_values({'r': od((('a', 1), ('b', None), ('c', '3')))})
{'r': OrderedDict([('a', 1), ('c', '3')])}
>>> from json import loads; repr(filter_none_values(loads('{"a": 1, "b": null, "c": 3}')))
"{u'a': 1, u'c': 3}"
>>> from json import loads; repr(filter_none_values(loads('{"a": 1, "b": [], "c": 3}')))
"{u'a': 1, u'c': 3, u'b': []}"
>>> from json import loads; repr(filter_none_values(loads('{"a": 1, "b": {"ba": null}, "c": 3}')))
"{u'a': 1, u'c': 3, u'b': {}}"
>>> from json import loads; repr(filter_none_values(loads('{"a": 1, "b": {"ba": []}, "c": 3}')))
"{u'a': 1, u'c': 3, u'b': {u'ba': []}}"
>>> from json import loads; repr(filter_none_values(loads('{"a": 1, "b": {"ba": {"baa": null}}, "c": 3}')))
"{u'a': 1, u'c': 3, u'b': {u'ba': {}}}"
"""
# def my_remove_none(obj):
# """Note: adapted from remove_none."""
# if isinstance(obj, (collections.Sequence, list, tuple, set)):
# return type(obj)(remove_none(x) for x in obj if x is not None)
# elif isinstance(obj, (collections.Mapping, dict)):
# return type(obj)((remove_none(k), remove_none(v))
# for k, v in obj.items() if k is not None and v is not None)
# else:
# return obj
def remove_none(obj):
"""Note: This one seems to be functionally equivalent to purify (at least for the cases I tested)."""
if isinstance(obj, (list, tuple, set)):
return type(obj)(remove_none(x) for x in obj if x is not None)
elif isinstance(obj, dict):
return type(obj)((remove_none(k), remove_none(v))
for k, v in obj.items() if k is not None and v is not None)
else:
return obj
def purify(o):
"""Note: This one seems to be functionally equivalent to remove_none (at least for the cases I tested)."""
if hasattr(o, 'items'):
oo = type(o)()
for k in o:
if k is not None and o[k] is not None:
oo[k] = purify(o[k])
elif hasattr(o, '__iter__'):
oo = []
for it in o:
if it is not None:
oo.append(purify(it))
else:
return o
return type(o)(oo)
def strip_none(data):
"""Note: This one doesn't support OrderedDict, etc."""
if isinstance(data, dict):
return {k: strip_none(v) for k, v in data.items() if k is not None and v is not None}
elif isinstance(data, list):
return [strip_none(item) for item in data if item is not None]
elif isinstance(data, tuple):
return tuple(strip_none(item) for item in data if item is not None)
elif isinstance(data, set):
return {strip_none(item) for item in data if item is not None}
else:
return data
if d is None:
return None
elif not hasattr(d, 'items'):
raise TypeError('d is not a dict-like object.')
if recursive:
# return my_remove_none(d)
# return remove_none(d)
return purify(d)
# return strip_none(d)
else:
d = d.copy()
# remove all bad keys
bad_keys = [k for k, v in d.items() if v is None]
for k in bad_keys:
d.pop(k)
return d | 2a25ae331c99196c6f6eed7d5fe055f27583b1d2 | 8,382 |
def nonseq():
""" Return non sequence """
return 1 | 7c8f4a616a6761153226d961be02f6cf5b0cc54a | 8,383 |
import io
def load_image(path, color_space = None, target_size = None):
"""Loads an image as an numpy array
Arguments:
path: Path to image file
target_size: Either, None (default to original size)
or tuple of ints '(image height, image width)'
"""
img = io.imread(path)
if target_size:
img = cv2.resize(img, target_size, interpolation = cv2.INTER_CUBIC)
return img | 882210dff5dfa46596562483966b4a72c37aa7a8 | 8,384 |
def kubernetes_node_label_to_dict(node_label):
"""Load Kubernetes node label to Python dict."""
if node_label:
label_name, value = node_label.split("=")
return {label_name: value}
return {} | c856d4e6d1f2169f7028ce842edc881cbca4e783 | 8,385 |
def get_category_user_problem(cat_name, username):
"""
获取直接在指定目录下的用户AC的题目、尚未AC的题目和尚未做过的题目的情况
:param cat_name:
:param username:
:return:
"""
cat = __Category.objects.filter(name=cat_name).first()
user = __User.objects.filter(username=username).first()
if user is None or cat is None:
return {'solved': [], 'not_solved': [], 'not_tried': []}
query_dict = {}
relation = __ProblemUserRelation.objects.filter(user=user).values('problem_id', 'solved').distinct()
for i in relation:
query_dict[i['problem_id']] = i['solved']
problems = cat.problem.filter(category_relation__direct=True).values('id', 'title')
solved = []
not_solved = []
not_tried = []
for i in problems:
if i['id'] in query_dict:
if query_dict[i['id']] is True:
solved.append(i)
else:
not_solved.append(i)
else:
not_tried.append(i)
return {'solved': solved, 'not_solved': not_solved, 'not_tried': not_tried} | 96e78d527f5bd0002345973eb085e04246e936ae | 8,386 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.