content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def analyze(model, Y, print_to_console=True):
"""
Perform variance-based sensitivty analysis for each process.
Parameters
----------
model : object
The model defined in the sammpy
Y : numpy.array
A NumPy array containing the model outputs
print_to_console : bool
Print results directly to console (default False)
Returns
----------
Returns a dictionary with keys 'PSK', 'PSTK', where
each entry is a list of size of the number of process.
"""
# Number of sample realizations
obs = Y.shape[1]
# Number of process and process models
npros = len(model.frames['names'])
# Creat a dict to store the results
S = create_si_dict(npros)
# Perfrom the difference-based process sensitivty anlaysis
if print_to_console:
print('Runing MMDS difference-based process sensitivy analysis...')
MMDS = mmds_mean_var(model, Y)
# Save results to the dict
for i in range(npros):
S['mean'][i] = MMDS[0, i]
S['variance'][i] = MMDS[1, i]
# Print results to console
if print_to_console:
print_indices(model, S)
return S | 119c00becb1c3b507e35cbcecd98762fcb924521 | 11,767 |
import copy
def GetMorganFingerprint(mol, atomId=-1, radius=2, fpType='bv', nBits=2048, useFeatures=False,
**kwargs):
"""
Calculates the Morgan fingerprint with the environments of atomId removed.
Parameters:
mol -- the molecule of interest
radius -- the maximum radius
fpType -- the type of Morgan fingerprint: 'count' or 'bv'
atomId -- the atom to remove the environments for (if -1, no environments is removed)
nBits -- the size of the bit vector (only for fpType = 'bv')
useFeatures -- if false: ConnectivityMorgan, if true: FeatureMorgan
any additional keyword arguments will be passed to the fingerprinting function.
"""
if fpType not in ['bv', 'count']:
raise ValueError("Unknown Morgan fingerprint type")
if not hasattr(mol, '_fpInfo'):
info = {}
# get the fingerprint
if fpType == 'bv':
molFp = rdMD.GetMorganFingerprintAsBitVect(mol, radius, nBits=nBits, useFeatures=useFeatures,
bitInfo=info, **kwargs)
else:
molFp = rdMD.GetMorganFingerprint(mol, radius, useFeatures=useFeatures, bitInfo=info,
**kwargs)
# construct the bit map
if fpType == 'bv':
bitmap = [DataStructs.ExplicitBitVect(nBits) for _ in range(mol.GetNumAtoms())]
else:
bitmap = [[] for _ in range(mol.GetNumAtoms())]
for bit, es in info.items():
for at1, rad in es:
if rad == 0: # for radius 0
if fpType == 'bv':
bitmap[at1][bit] = 1
else:
bitmap[at1].append(bit)
else: # for radii > 0
env = Chem.FindAtomEnvironmentOfRadiusN(mol, rad, at1)
amap = {}
Chem.PathToSubmol(mol, env, atomMap=amap)
for at2 in amap.keys():
if fpType == 'bv':
bitmap[at2][bit] = 1
else:
bitmap[at2].append(bit)
mol._fpInfo = (molFp, bitmap)
if atomId < 0:
return mol._fpInfo[0]
else: # remove the bits of atomId
if atomId >= mol.GetNumAtoms():
raise ValueError("atom index greater than number of atoms")
if len(mol._fpInfo) != 2:
raise ValueError("_fpInfo not set")
if fpType == 'bv':
molFp = mol._fpInfo[0] ^ mol._fpInfo[1][atomId] # xor
else: # count
molFp = copy.deepcopy(mol._fpInfo[0])
# delete the bits with atomId
for bit in mol._fpInfo[1][atomId]:
molFp[bit] -= 1
return molFp | 9fd8077c4f35c83e8996a53981f99baa0e4510a6 | 11,768 |
import math
def _rgb2lab(rgb):
"""Convert an RGB integer to Lab tuple"""
def xyzHelper(value):
"""Helper function for XYZ colourspace conversion"""
c = value / 255
if c > 0.0445:
c = (c + 0.055) / 1.055
c = math.pow(c, 2.4)
else:
c /= 12.92
c *= 100
return c
def labHelper(value):
"""Helper function for Lab colourspace conversion"""
c = value
if c > 0.008856:
c = math.pow(c, 1.0 / 3.0)
else:
c = (7.787 * c) + (16.0 / 116.0)
return c
# convert into XYZ colourspace
c1 = xyzHelper((rgb >> 16) & 0xFF)
c2 = xyzHelper((rgb >> 8) & 0xFF)
c3 = xyzHelper(rgb & 0xFF)
x = (c1 * 0.4124) + (c2 * 0.3576) + (c3 * 0.1805)
y = (c1 * 0.2126) + (c2 * 0.7152) + (c3 * 0.0722)
z = (c1 * 0.0193) + (c2 * 0.1192) + (c3 * 0.9505)
# convert into Lab colourspace
c1 = labHelper(x / 95.047)
c2 = labHelper(y / 100.0)
c3 = labHelper(z / 108.883)
l = (116.0 * c2) - 16
a = 500.0 * (c1 - c2)
b = 200.0 * (c2 - c3)
return LabColour(l, a, b) | a663370e3908daa9ba795bb0dc2ecb945653221e | 11,769 |
import torch
def biband_mask(n: int, kernel_size: int, device: torch.device, v=-1e9):
"""compute mask for local attention with kernel size.
Args:
n (torch.Tensor): the input length.
kernel_size (int): The local attention kernel size.
device (torch.device): transformer mask to the device.
Returns: torch.Tensor. shape: [n,n]. The masked locations are -1e9
and unmasked locations are 0.
"""
if kernel_size is None:
return None
half = kernel_size // 2
mask1 = torch.ones(n, n).triu(diagonal=-half)
mask2 = torch.ones(n, n).tril(diagonal=half)
mask = mask1 * mask2
mask = (1 - mask) * v
return mask.to(device) | ab3a5f25f9fe0f83579d0492caa2913a13daa2d7 | 11,771 |
def containsIfElse(node):
""" Checks whether the given node contains another if-else-statement """
if node.type == "if" and hasattr(node, "elsePart"):
return True
for child in node:
if child is None:
pass
# Blocks reset this if-else problem so we ignore them
# (and their content) for our scan.
elif child.type == "block":
pass
# Script blocks reset as well (protected by other function)
elif child.type == "script":
pass
elif containsIfElse(child):
return True
return False | 255f58fdf4abe69f10e9b433562ade12cb0bc215 | 11,772 |
def get_gitlab_scripts(data):
"""GitLab is nice, as far as I can tell its files have a
flat hierarchy with many small job entities"""
def flatten_nested_string_lists(data):
"""helper function"""
if isinstance(data, str):
return data
elif isinstance(data, list):
return "\n".join([flatten_nested_string_lists(item) for item in data])
else:
raise ValueError(
f"unexpected data type {type(data)} in script section: {data}"
)
result = {}
for jobkey in data:
if not isinstance(data[jobkey], dict):
continue
for section in ["script", "before_script", "after_script"]:
if section in data[jobkey]:
script = data[jobkey][section]
result[f"{jobkey}/{section}"] = flatten_nested_string_lists(script)
return result | ad73c1ea6d4edcbce51eea18de317d7ab2d5e536 | 11,774 |
import new
def method(cls):
"""Adds the function as a method to the given class."""
def _wrap(f):
cls.__dict__[f.func_name] = new.instancemethod(f,None,cls)
return None
return _wrap | 0f746420bf9870dec5d8a5e69bcec414530fc1cb | 11,775 |
def maps_from_echse(conf):
"""Produces time series of rainfall maps from ECHSE input data and catchment shapefiles.
"""
# Read sub-catchment rainfall from file
fromfile = np.loadtxt(conf["f_data"], dtype="string", delimiter="\t")
if len(fromfile)==2:
rowix = 1
elif len(fromfile)>2:
rowix = slice(1,len(fromfile))
else:
raise Exception("Data file is empty: %s" % conf["f_data"])
var = fromfile[rowix,1:].astype("f4")
dtimes = fromfile[rowix,0]
dtimes_file = np.array([wradlib.util.iso2datetime(dtime) for dtime in dtimes])
dtimesfromconf = wradlib.util.from_to(conf["tstart"], conf["tend"], conf["interval"])
dtimes = np.intersect1d(dtimes_file, dtimesfromconf)
if len(dtimes)==0:
print "No datetimes for mapping based on intersection of data file and config info."
return(0)
# objects = fromfile[0,1:]
cats = plt.genfromtxt(conf["f_coords"], delimiter="\t", names=True,
dtype=[('id', '|S20'), ('lat', 'f4'), ('lon', 'f4'),
('x', 'f4'), ('y', 'f4')])
mapx, mapy = wradlib.georef.reproject(cats["x"],cats["y"],
projection_source=conf["trg_proj"],
projection_target=conf["map_proj"])
# Read shapefile
dataset, inLayer = wradlib.io.open_shape(conf["f_cats_shp"])
polys, keys = wradlib.georef.get_shape_coordinates(inLayer, key='DN')
keys = np.array(keys)
# Preprocess polygons (remove minors, sort in same order as in coords file)
polys2 = []
for i, id in enumerate(cats["id"]):
keyix = np.where( keys==eval(id.strip("cats_")) )[0]
if len(keyix) > 1:
# More than one key matching? Find largest matching polygon
keyix = keyix[np.argmax([len(polys[key]) for key in keyix])]
else:
keyix = keyix[0]
poly = polys[keyix].copy()
if poly.ndim==1:
# Multi-Polygons - keep only the largest polygon
# (just for plotting - no harm done)
poly2 = poly[np.argmax([len(subpoly) for subpoly in poly])].copy()
else:
poly2 = poly.copy()
polys2.append ( wradlib.georef.reproject(poly2,
projection_source=conf["trg_proj"],
projection_target=conf["map_proj"]) )
colors = plt.cm.spectral(np.linspace(0,1,len(conf["levels"])))
mycmap, mynorm = from_levels_and_colors(conf["levels"], colors, extend="max")
plt.interactive(False)
for dtime in dtimes:
datestr = (dtime-dt.timedelta(seconds=conf["interval"])).strftime("%Y%m%d.png")
i = np.where(dtimes_file==dtime)[0][0]
print datestr, i
figpath = os.path.join(conf["savefigs"], datestr)
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111, aspect="equal")
ax, coll = tl.vis.plot_cats(polys2, var[i], ax=ax, bbox=conf["bbox"], cmap=mycmap,
norm=mynorm, edgecolors='none')
cb = plt.colorbar(coll, ax=ax, ticks=conf["levels"], shrink=0.6)
cb.ax.tick_params(labelsize="small")
cb.set_label("(mm)")
plt.xlabel("Longitude")
plt.ylabel("Latitude")
tl.vis.plot_trmm_grid_lines(ax)
plt.text(conf["bbox"]["left"]+0.25, conf["bbox"]["top"]-0.25,
"%s\n%s to\n%s" % (conf["figtxtbody"],
(dtime-dt.timedelta(seconds=conf["interval"])).isoformat(" "),
dtime.isoformat(" ") ),
color="red", fontsize="small", verticalalignment="top")
plt.tight_layout()
plt.savefig(figpath)
plt.close()
plt.interactive(True) | 31e09c5bed2f7fe3e0d750a59137c05ef987dc2e | 11,776 |
def utility_assn(tfr_dfs):
"""Harvest a Utility-Date-State Association Table."""
# These aren't really "data" tables, and should not be searched for associations
non_data_dfs = [
"balancing_authority_eia861",
"service_territory_eia861",
]
# The dataframes from which to compile BA-Util-State associations
data_dfs = [tfr_dfs[table]
for table in tfr_dfs if table not in non_data_dfs]
logger.info("Building an EIA 861 Util-State-Date association table.")
tfr_dfs["utility_assn_eia861"] = _harvest_associations(
data_dfs, ["report_date", "utility_id_eia", "state"])
return tfr_dfs | 6b0357f1d7024bcfddac6981d968e67e5dbeba51 | 11,777 |
def is_smtp_enabled(backend=None):
"""
Check if the current backend is SMTP based.
"""
if backend is None:
backend = get_mail_backend()
return backend not in settings.SENTRY_SMTP_DISABLED_BACKENDS | 988d2173923dc53cd3179cf0866c702ab9fe69d4 | 11,778 |
import requests
def get_presentation_requests_received(tenant: str, state: str = ''):
"""
state: must be in ['propsal-sent', 'proposal-received', 'request-sent', 'request-received', 'presentation-sent', 'presentation-received', 'done', 'abondoned']
"""
possible_states = ['', 'propsal-sent', 'proposal-received', 'request-sent', 'request-received', 'presentation-sent', 'presentation-received', 'done', 'abondoned']
if state not in possible_states:
raise HTTPException(400, "state must be in: " + possible_states)
params = None
if state:
params = {
'state': state,
}
j = requests.get(ACAPY_API + '/present-proof-2.0/records', params=params, headers=prepare_headers(tenant=tenant)).json()
return j['results'] | 1157712b8e4df1b269892a2d3ec15dae366d8d71 | 11,779 |
def generate_round():
"""
Генерируем раунд.
Returns:
question: Вопрос пользователю
result: Правильный ответ на вопрос
"""
total_num, random_num = generate_numbers()
question = " ".join(total_num)
answer = str(random_num)
return question, answer | d4b535016e6ca6c6d673c1a6a2ee2c20eca87bc1 | 11,780 |
from datetime import datetime
def get_basic_activity():
"""
A basic set of activity records for a 'Cohort 1' and CoreParticipant participant.
"""
return [
{'timestamp': datetime(2018, 3, 6, 0, 0), 'group': 'Profile', 'group_id': 1,
'event': p_event.EHRFirstReceived},
{'timestamp': datetime(2018, 3, 6, 20, 20, 57), 'group': 'Profile', 'group_id': 1,
'event': p_event.SignupTime},
{'timestamp': datetime(2018, 3, 6, 20, 35, 12), 'group': 'QuestionnaireModule', 'group_id': 40,
'event': p_event.ConsentPII, 'answer': 'ConsentPermission_Yes',
'answer_id': 767},
{'timestamp': datetime(2018, 3, 6, 20, 43, 50), 'group': 'QuestionnaireModule', 'group_id': 40,
'event': p_event.EHRConsentPII, 'answer': 'ConsentPermission_Yes',
'answer_id': 767},
{'timestamp': datetime(2018, 3, 6, 20, 46, 48), 'group': 'QuestionnaireModule', 'group_id': 40,
'event': p_event.TheBasics, 'ConsentAnswer': None},
{'timestamp': datetime(2018, 3, 6, 20, 49, 0), 'group': 'QuestionnaireModule', 'group_id': 40,
'event': p_event.OverallHealth, 'ConsentAnswer': None},
{'timestamp': datetime(2018, 3, 6, 20, 51, 6), 'group': 'QuestionnaireModule', 'group_id': 40,
'event': p_event.Lifestyle, 'ConsentAnswer': None},
{'timestamp': datetime(2018, 3, 28, 20, 18, 59), 'group': 'Biobank', 'group_id': 20,
'event': p_event.BiobankConfirmed, 'dna_tests': 3, 'basline_tests': 4},
{'timestamp': datetime(2018, 5, 17, 2, 11, 37), 'group': 'Biobank', 'group_id': 20,
'event': p_event.BiobankOrder, 'dna_tests': 0, 'basline_tests': 0},
# ROC-295: duplicate record, manually cancelled
{'timestamp': datetime(2018, 5, 21, 18, 9, 8), 'group': 'Profile', 'group_id': 1,
'event': p_event.PhysicalMeasurements, 'status': 'CANCELLED', 'status_id': 2},
{'timestamp': datetime(2018, 5, 21, 18, 9, 12), 'group': 'Profile', 'group_id': 1,
'event': p_event.PhysicalMeasurements, 'status': 'COMPLETED', 'status_id': 1},
{'timestamp': datetime(2019, 6, 13, 0, 0), 'group': 'Profile', 'group_id': 1,
'event': p_event.EHRLastReceived}
] | 4ee13cf35326d6c09fb4174f0e4217b17a34a545 | 11,781 |
def bad_multi_examples_per_input_estimator_misaligned_input_refs(
export_path, eval_export_path):
"""Like the above (good) estimator, but the input_refs is misaligned."""
estimator = tf.estimator.Estimator(model_fn=_model_fn)
estimator.train(input_fn=_train_input_fn, steps=1)
return util.export_model_and_eval_model(
estimator=estimator,
serving_input_receiver_fn=_serving_input_receiver_fn,
eval_input_receiver_fn=_bad_eval_input_receiver_fn_misaligned_input_refs,
export_path=export_path,
eval_export_path=eval_export_path) | c08fac8d0ae8679db56128dc8d4a36a5492a6737 | 11,782 |
def caption_example(image):
"""Convert image caption data into an Example proto.
Args:
image: A ImageMetadata instance.
Returns:
example: An Example proto with serialized tensor data.
"""
# Collect image object information from metadata.
image_features, positions = read_object(image.objects, image.image_id)
# Serialize multi-dimensional tensor data.
captions_proto = tf.make_tensor_proto(np.array(image.captions))
features_proto = tf.make_tensor_proto(image_features)
positions_proto = tf.make_tensor_proto(positions)
# Create final features dict.
features = dict(
image_id=int64_feature(image.image_id),
captions=bytes_feature(captions_proto.SerializeToString()),
object_features=bytes_feature(features_proto.SerializeToString()),
object_positions=bytes_feature(positions_proto.SerializeToString()))
return tf.train.Example(features=tf.train.Features(feature=features)) | f989774a0d3321717cbb09f6342a6c86f5433c54 | 11,785 |
def GetAttributeTableByFid(fileshp, layername=0, fid=0):
"""
GetAttributeTableByFid
"""
res = {}
dataset = ogr.OpenShared(fileshp)
if dataset:
layer = dataset.GetLayer(layername)
feature = layer.GetFeature(fid)
geom = feature.GetGeometryRef()
res["geometry"] = geom.ExportToWkt()
layerDefinition = layer.GetLayerDefn()
for j in range(layerDefinition.GetFieldCount()):
fieldname = layerDefinition.GetFieldDefn(j).GetName()
res[fieldname] = feature.GetField(j)
dataset = None
return res | 42b845ae5b1a3c9976262cc37f5854b80aa7b290 | 11,786 |
def get_root_folder_id(db, tree_identifier, linked_to, link_id):
"""Get id of the root folder for given data category and profile or user group
Args:
db (object): The db object
tree_identifier (str): The identifier of the tree
linked_to (str): ['profile'|'group']
link_id (int): The profile id or the group id (depending on linked_to)
Returns:
The id of the root folder.
"""
if linked_to not in ['profile', 'group']:
raise MSGException(Error.CORE_INVALID_PARAMETER,
"Incorrect 'linked_to' value.")
root_folder_id = None
SQL_PROFILE = """SELECT root_folder_id
FROM data_profile_tree
WHERE profile_id=? AND tree_identifier=?"""
SQL_USER_GROUP = """SELECT root_folder_id
FROM data_user_group_tree
WHERE user_group_id=? AND tree_identifier=?"""
sql = SQL_PROFILE if linked_to == 'profile' else SQL_USER_GROUP
res = db.execute(sql,
(link_id, tree_identifier)).fetch_one()
if res:
root_folder_id = res['root_folder_id']
return root_folder_id | 7378ec4852d90913282109dcce5d8168613c835e | 11,787 |
def str_cell(cell):
"""Get a nice string of given Cell statistics."""
result = f"-----Cell ({cell.x}, {cell.y})-----\n"
result += f"sugar: {cell.sugar}\n"
result += f"max sugar: {cell.capacity}\n"
result += f"height/level: {cell.level}\n"
result += f"Occupied by Agent {cell.agent.id if cell.agent else None}\n"
return result | d62801290321d5d2b8404dbe6243f2f0ae03ecef | 11,788 |
def get_idx_pair(mu):
"""get perturbation position"""
idx = np.where(mu != 0)[0]
idx = [idx[0], idx[-1]]
return idx | eed8b77f3f21af93c28c84d6f325dd2161740e6f | 11,789 |
def zeeman_transitions(ju, jl, type):
""" Find possible mu and ml for valid ju and jl for a given transistion
polarization
Parameters:
ju (scalar): Upper level J
jl (scalar): Lower level J
type (string): "Pi", "S+", or "S-" for relevant polarization type
Returns:
tuple: MU, ML arrays for given Js and polarization type
"""
assert np.isscalar(ju) and np.isscalar(jl), "non-scalar J non supported"
assert type.lower() in ["pi", "s+", "s-"], "unknown transition type"
assert ju - jl in [-1, 0, 1], "delta-J should belong to {-1, 0, 1}"
assert ju > 0 and jl >= 0, "only for positive ju and non-negative for jl"
if type.lower() == "pi":
J = min(ju, jl)
return np.arange(-J, J + 1), np.arange(-J, J + 1)
elif type.lower() == "s+":
if ju < jl:
return np.arange(-ju, ju+1), np.arange(-ju+1, ju+2)
elif ju == jl:
return np.arange(-ju, ju), np.arange(-ju+1, ju+1)
else:
return np.arange(-ju, jl), np.arange(-ju+1, jl+1)
elif type.lower() == "s-":
if ju < jl:
return np.arange(-ju, ju+1), np.arange(-jl, ju)
elif ju == jl:
return np.arange(-ju+1, ju+1), np.arange(-ju, ju)
else:
return np.arange(-ju+2, ju+1), np.arange(-ju+1, ju) | 446d9683da6cc027003b2ec755d8828ccb01db5d | 11,790 |
def get_reachable_nodes(node):
"""
returns a list with all the nodes from the tree with root *node*
"""
ret = []
stack = [node]
while len(stack) > 0:
cur = stack.pop()
ret.append(cur)
for c in cur.get_children():
stack.append(c)
return ret | c9ffaca113a5f85484433f214015bf93eea602d1 | 11,791 |
def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_12l_16h_imagenet_large()
hparams.num_hidden_layers = 16
hparams.local_attention = True
hparams.batch_size = 1
hparams.block_length = 256
return hparams | 1d428cae33a6a34a7844171c72c7821a44fc3e97 | 11,792 |
def torsion_coordinate_names(zma):
""" z-matrix torsional coordinate names
(currently assumes torsional coordinates generated through x2z)
"""
name_dct = standard_names(zma)
inv_name_dct = dict(map(reversed, name_dct.items()))
geo = automol.geom.without_dummy_atoms(geometry(zma))
tors_names = automol.convert.geom.zmatrix_torsion_coordinate_names(geo)
tors_names = tuple(map(inv_name_dct.__getitem__, tors_names))
return tors_names | a7820e1619d4a73260ec4d9255b78cdec2263a55 | 11,793 |
from typing import List
from typing import Dict
def extract_other_creditors_d(
page: pdfplumber.pdf.Page, markers: List[Dict], creditors: List
) -> None:
"""Crop and extract address, key and acct # from the PDf
:param page: PDF page
:param markers: The top and bottom markers
:return: Address, key and account information
"""
adjust = 0 if len(markers) == 5 else 12
addy_bbox = (
0,
markers[0]["top"],
int(markers[-1]["x1"]) * 0.35,
markers[-1]["top"],
)
key_bbox = (
markers[-3]["x0"],
markers[0]["top"] - adjust,
markers[-3]["x1"],
markers[-3]["top"],
)
acct_bbox = (
markers[1]["x0"],
markers[1]["top"] - 12,
markers[1]["x1"],
markers[1]["top"],
)
address = page.crop(addy_bbox).filter(keys_and_input_text).extract_text()
key = (
page.crop(key_bbox).filter(keys_and_input_text).extract_text().strip()
)
acct = page.crop(acct_bbox).filter(keys_and_input_text).extract_text()
for creditor in creditors:
if creditor["key"] == key:
other_creditors = creditor["other_creditors"]
other_creditors.append(
{"key": key, "address": address, "acct": acct}
)
creditor["other_creditors"] = other_creditors
return creditors | cb66185c68c7ab3febeee611e4384b839b42417e | 11,794 |
from typing import List
from pathlib import Path
def get_dicdirs(mecab_config: str = "mecab-config") -> List[Path]:
"""Get MeCab dictionary directories.
Parameters
----------
mecab_config : str
Executable path of mecab-config, by default "mecab-config".
Returns
-------
List[Path]
MeCab dictionary directories.
"""
dicdirs = []
for path in _mecab_config_dicdir(mecab_config).glob("**/dicrc"):
dicdirs.append(path.parent.resolve())
return dicdirs | 26d7969c072a9aa0668db31c296ee930b567049f | 11,795 |
def new_instance(settings):
"""
MAKE A PYTHON INSTANCE
`settings` HAS ALL THE `kwargs`, PLUS `class` ATTRIBUTE TO INDICATE THE CLASS TO CREATE
"""
settings = set_default({}, settings)
if not settings["class"]:
Log.error("Expecting 'class' attribute with fully qualified class name")
# IMPORT MODULE FOR HANDLER
path = settings["class"].split(".")
class_name = path[-1]
path = ".".join(path[:-1])
constructor = None
try:
temp = __import__(path, globals(), locals(), [class_name], 0)
constructor = object.__getattribute__(temp, class_name)
except Exception as e:
Log.error("Can not find class {{class}}", {"class": path}, cause=e)
settings['class'] = None
try:
return constructor(kwargs=settings) # MAYBE IT TAKES A KWARGS OBJECT
except Exception as e:
pass
try:
return constructor(**settings)
except Exception as e:
Log.error("Can not create instance of {{name}}", name=".".join(path), cause=e) | bf32bd41105052816a9a54efb71143f2a250502f | 11,796 |
def get_type(k):
"""Takes a dict. Returns undefined if not keyed, otherwise returns the key type."""
try:
v = {
'score': '#text',
'applicant': 'str',
'applicant_sort': 'str',
'author': 'str',
'author_sort': 'str',
'brief': 'bool',
'city': 'str',
'daNumber': 'str',
'dateCommentPeriod': 'date',
'dateReplyComment': 'date',
'dateRcpt': 'date',
'disseminated': 'date',
'exParte': 'bool',
'fileNumber': 'str',
'id': 'long',
'lawfirm': 'str',
'lawfirm_sort': 'str',
'modified': 'date',
'pages': 'int',
'proceeding': 'str',
'reportNumber': 'str',
'regFlexAnalysis': 'bool',
'smallBusinessImpact': 'bool',
'stateCd': 'str',
'submissionType': 'str',
'text': 'str',
'viewingStatus': 'str',
'zip': 'str'
}[k]
except:
v = False
return v | fec3b7e04531dd202c46366f096f687160c68320 | 11,798 |
def al(p):
"""
Given a quaternion p, return the 4x4 matrix A_L(p)
which when multiplied with a column vector q gives
the quaternion product pq.
Parameters
----------
p : numpy.ndarray
4 elements, represents quaternion
Returns
-------
numpy.ndarray
4x4 matrix describing action of quaternion multiplication
"""
# Given a quaternion p, return the 4x4 matrix A_L(p)
# which when multiplied with a column vector q gives
# the quaternion product pq.
return np.array([[p[0], -p[1], -p[2], -p[3]],
[p[1], p[0], -p[3], p[2]],
[p[2], p[3], p[0], -p[1]],
[p[3], -p[2], p[1], p[0]]]) | 1e4803bffd75fb841b723d504261c51019d5d45e | 11,799 |
from datetime import datetime
def build_data_table(row, fields_to_try):
"""
Create HTML table for one row of data
If no fields are valid, returns empty string
"""
th_class = 'attribute_heading'
td_class = 'attribute_value'
field_names = pd.read_csv('data/field_names.csv')
output_table = """
<table>
<tbody>
"""
fields_written = 0
for field_name in fields_to_try:
if field_name in row:
field_value = row[field_name]
# Convert timestamp to human-readable string
if isinstance(field_value, datetime):
field_value = field_value.strftime('%B %-d, %Y')
if pd.notna(field_value) and len(field_value) > 0:
# If no display_name has been defined for the field_name, use the field_name as the display_name
if sum(field_names['field_name'] == field_name) == 0:
display_name = field_name
else:
display_name = field_names.loc[field_names['field_name'] == field_name, 'display_name'].values[0]
output_table += f"""
<tr>
<th class="{th_class}">{display_name}</th>
"""
if '_link' in field_name:
output_table += f'<td class="{td_class}"><a href="{field_value}">{field_value}</a></td>'
elif '_email' in field_name:
output_table += f'<td class="{td_class}"><a href="mailto:{field_value}">{field_value}</a></td>'
else:
output_table += f'<td class="{td_class}">{field_value}</td>'
output_table += '</tr>'
fields_written += 1
output_table += """
</tbody>
</table>
"""
# or could use: if any([(f in row.index) for f in fields_to_try]):
if fields_written == 0:
output_table = ''
return output_table | 812a7acbe33296fc30aef4b27e427c63c6fc63bb | 11,800 |
def time_entry_reader(date, configuration):
"""Read the entries and return a list of entries that are apart of the date provided."""
parser = YAML(typ='rt')
date = date.date()
try:
with open(configuration['filename'], 'r') as data_file:
time_entries = parser.load(data_file).get('records', [])
except FileNotFoundError:
LOGGER.error('Cannot read file %s', configuration['filename'])
raise RuntimeError(f'Cannot read file {configuration["filename"]}')
return [te for te in time_entries if te['date'] == date] | 5e01246d3fae1d8eaf53cbf1dec40f488ddfd0d4 | 11,801 |
import asyncio
async def test_send_write(event_loop):
"""Check feed-receive scenarios used in the library."""
STREAM_ID = 'whatever'
DATA = b'data'
def make_writer():
queue = asyncio.Queue()
async def writer(id, data):
assert id == STREAM_ID
await queue.put(data)
return writer, queue
for stream_mode in [StreamMode.WRITE, StreamMode.READ | StreamMode.WRITE]:
stream = Stream(STREAM_ID, event_loop)
writer, queue = make_writer()
stream.on_write.append(writer)
with pytest.raises(exceptions.InvalidStreamMode):
await stream.send(None)
stream.open(stream_mode)
assert stream.is_writable
await stream.send(DATA)
assert await queue.get() == DATA
with pytest.raises(TypeError):
await stream.send(None)
stream.close_sync()
with pytest.raises(exceptions.InvalidStreamState):
await stream.send(None) | 669d25646aecd891547c3cbc40f7215e1c32c08b | 11,802 |
def width_series(value_series, outer_average_width=5, max_value=None, method='linear'):
"""
:param value_series: the pd.Series that contain the values
:param outer_average_width: the average width of the width series to return
:param max_value: value to use as the maximum when normalizing the series (to focus low values)
:param method: linear or surface
:return: width_series: pd.Series that contains the widths corresponding to the values
:rtype: pd.Series
"""
max_value = max_value if max_value else np.max(list(value_series.values))
if method == 'linear':
serie = value_series.apply(lambda x: x / max_value * outer_average_width)
elif method == 'surface':
serie = value_series.apply(lambda x: np.sqrt(x / max_value) * outer_average_width)
return serie | 0efefbe0d1e7024293e0f6a8a39b7fca2f5cf41b | 11,803 |
def unroll_upper_triangular(matrix):
"""Converts square matrix to vector by unrolling upper triangle."""
rows, cols = matrix.shape
assert rows == cols, "Not a square matrix."
row_idx, col_idx = np.triu_indices(rows, 1)
unrolled = []
for i, j in zip(row_idx, col_idx):
unrolled.append(matrix[i][j])
assert len(unrolled) == rows * (rows - 1) // 2
return unrolled | b62725a178d569e2812ad48c826b8a7a864c04b6 | 11,804 |
from typing import Sequence
from typing import Any
from typing import Optional
from typing import Tuple
def fill_tuples(
tuples: Sequence[Any],
length: Optional[int] = None,
repeat: bool = False,
fill_method: str = 'bfill',
) -> Sequence[Tuple]:
"""Fill tuples so they are all the same length.
Parameters
----------
length : int, optional
Fill tuples to a fixed length. If None, fills to max length of
the non-string sequence objects given by tuples.
repeat : bool, default False
If True then fills missing tuple values with the current value
at the end of the sequence given by ``at``. If False fills with None.
fill_method : {'bfill', 'ffill'}, str
Whether to forward fill or backfill the tuple values.
"""
if not length:
if not any(is_non_string_sequence(t) for t in tuples):
return tuples
length = max(len(t) for t in tuples if is_non_string_sequence(t))
new_tups = []
for tup in tuples:
tup = tuple_convert(tup)
while len(tup) < length:
if fill_method == 'bfill':
tup = (tup[0] if repeat else None,) + tup
else: # 'end'
tup += (tup[-1] if repeat else None,)
new_tups.append(tup)
return new_tups | 80766f17b78a3fba0dc49ef95131564ce7b1e563 | 11,805 |
def intersect(connection, items, ttl=30, execute=True):
"""并集计算"""
return _set_common(connection, 'sinterstore', items, ttl, execute) | 154480043f2b7634913839ea6ed1425ecc8cc312 | 11,806 |
def batch_decode(raw_logits, use_random, decode_times):
"""
tbd
"""
size = (raw_logits.shape[1] + 7) // 8
logit_lists = []
for i in range(0, raw_logits.shape[1], size):
if i + size < raw_logits.shape[1]:
logit_lists.append(raw_logits[:, i: i + size, :])
else:
logit_lists.append(raw_logits[:, i:, :])
result_list = [decode_chunk(logit_lists[i], use_random, decode_times) for i in range(len(logit_lists))]
return_list = []
for _0 in result_list:
for _1 in _0:
return_list.append(_1)
return return_list | fcde630681d4455e717b7b3b19b098b72fb8a64c | 11,807 |
import pathlib
import pkg_resources
import yaml
def _load_schemata(obj_type: str) -> dict:
"""Load the schemata from the package, returning merged results of
other schema files if referenced in the file loaded.
:raises: FileNotFoundError
"""
schema_path = pathlib.Path(pkg_resources.resource_filename(
'pglifecycle', 'schemata/{}.yml'.format(obj_type).replace(' ', '_')))
if not schema_path.exists():
raise FileNotFoundError(
'Schema file not found for object type {!r}'.format(obj_type))
return _preprocess(yaml.load(schema_path)) | a737420b85bd78cf2210c8d12794eaaa4eb4ee90 | 11,808 |
def waitfor(msg, status = '', spinner = None, log_level = log_levels.INFO):
"""waitfor(msg, status = '', spinner = None) -> waiter
Starts a new progress indicator which includes a spinner
if :data:`pwnlib.term.term_mode` is enabled. By default it
outputs to loglevel :data:`pwnlib.log_levels.INFO`.
Args:
msg (str): The message of the spinner.
status (str): The initial status of the spinner.
spinner (list): This should either be a list of strings or None.
If a list is supplied, then a either element of the list
is shown in order, with an update occuring every 0.1 second.
Otherwise a random spinner is chosen.
log_level(int): The log level to output the text to.
Returns:
A waiter-object that can be updated using :func:`status`, :func:`done_success` or :func:`done_failure`.
"""
if context.log_level > log_level:
h = _DummyWaiter()
elif term.term_mode:
h = _TermWaiter(msg, spinner, log_level)
else:
h = _SimpleWaiter(msg, spinner, log_level)
if status:
h.status(status)
_waiter_stack.append(h)
return h | 6dc229cff86ecdbdccbda71239eafdc878c4520e | 11,809 |
def f(i):
"""Add 2 to a value
Args:
i ([int]): integer value
Returns:
[int]: integer value
"""
return i + 2 | 72b5d99f3b2132054805ab56872cf2199b425b20 | 11,810 |
def show_forecast(cmp_df, num_predictions, num_values, title):
"""Visualize the forecast."""
def create_go(name, column, num, **kwargs):
points = cmp_df.tail(num)
args = dict(name=name, x=points.index, y=points[column], mode='lines')
args.update(kwargs)
return go.Scatter(**args)
lower_bound = create_go('Lower Bound', 'yhat_lower', num_predictions,
line=dict(width=0),
marker=dict(color="red"))
upper_bound = create_go('Upper Bound', 'yhat_upper', num_predictions,
line=dict(width=0),
marker=dict(color="red"),
fillcolor='rgba(68, 68, 68, 0.3)',
fill='tonexty')
forecast = create_go('Forecast', 'yhat', num_predictions,
line=dict(color='rgb(31, 119, 180)'))
actual = create_go('Actual', 'y', num_values,
marker=dict(color="red"))
# In this case the order of the series is important because of the filling
data = [lower_bound, upper_bound, forecast, actual]
layout = go.Layout(yaxis=dict(title='Posts'), title=title, showlegend = False)
fig = go.Figure(data=data, layout=layout)
plot(fig, show_link=False) | 669300c4c57890d76153fe1a419037eada2fcbe6 | 11,811 |
def get_concepts_from_kmeans(tfidf, kmeans):
"""Get kmeans cluster centers in term space.
Parameters
----------
tfidf : TfidfVectorizer
Fitted vectorizer with learned term vocabulary.
kmeans : KMeans
KMeans fitted to document-term matrix returned by tfidf.
Returns
-------
pandas.DataFrame
Columns are terms, rows are "concepts" sorted by cluster size.
"""
df = pd.DataFrame(kmeans.cluster_centers_, columns=tfidf.get_feature_names())
return df.reindex(pd.Series(kmeans.labels_).value_counts().index) | 69734b194c5d71c8e93347845f83264c832820d6 | 11,813 |
def streams_to_dataframe(streams, imcs=None, imts=None, event=None):
"""Extract peak ground motions from list of processed StationStream objects.
Note: The PGM columns underneath each channel will be variable
depending on the units of the Stream being passed in (velocity
sensors can only generate PGV) and on the imtlist passed in by
user. Spectral acceleration columns will be formatted as SA(0.3)
for 0.3 second spectral acceleration, for example.
Args:
directory (str):
Directory of ground motion files (streams).
imcs (list):
Strings designating desired components to create in table.
imts (list):
Strings designating desired PGMs to create in table.
event (ScalarEvent): Defines the focal time,
geographic location, and magnitude of an earthquake hypocenter.
Default is None.
Returns:
DataFrame: Pandas dataframe containing columns:
- STATION Station code.
- NAME Text description of station.
- LOCATION Two character location code.
- SOURCE Long form string containing source network.
- NETWORK Short network code.
- LAT Station latitude
- LON Station longitude
- DISTANCE Epicentral distance (km) (if epicentral
lat/lon provided)
- HN1 East-west channel (or H1) (multi-index with pgm columns):
- PGA Peak ground acceleration (%g).
- PGV Peak ground velocity (cm/s).
- SA(0.3) Pseudo-spectral acceleration at 0.3 seconds (%g).
- SA(1.0) Pseudo-spectral acceleration at 1.0 seconds (%g).
- SA(3.0) Pseudo-spectral acceleration at 3.0 seconds (%g).
- HN2 North-south channel (or H2) (multi-index with pgm columns):
- PGA Peak ground acceleration (%g).
- PGV Peak ground velocity (cm/s).
- SA(0.3) Pseudo-spectral acceleration at 0.3 seconds (%g).
- SA(1.0) Pseudo-spectral acceleration at 1.0 seconds (%g).
- SA(3.0) Pseudo-spectral acceleration at 3.0 seconds (%g).
- HNZ Vertical channel (or HZ) (multi-index with pgm columns):
- PGA Peak ground acceleration (%g).
- PGV Peak ground velocity (cm/s).
- SA(0.3) Pseudo-spectral acceleration at 0.3 seconds (%g).
- SA(1.0) Pseudo-spectral acceleration at 1.0 seconds (%g).
- SA(3.0) Pseudo-spectral acceleration at 3.0 seconds (%g).
- GREATER_OF_TWO_HORIZONTALS (multi-index with pgm columns):
- PGA Peak ground acceleration (%g).
- PGV Peak ground velocity (cm/s).
- SA(0.3) Pseudo-spectral acceleration at 0.3 seconds (%g).
- SA(1.0) Pseudo-spectral acceleration at 1.0 seconds (%g).
- SA(3.0) Pseudo-spectral acceleration at 3.0 seconds (%g).
"""
if imcs is None:
station_summary_imcs = DEFAULT_IMCS
else:
station_summary_imcs = imcs
if imts is None:
station_summary_imts = DEFAULT_IMTS
else:
station_summary_imts = imts
subdfs = []
for stream in streams:
if not stream.passed:
continue
if len(stream) < 3:
continue
stream_summary = StationSummary.from_stream(
stream, station_summary_imcs, station_summary_imts, event)
summary = stream_summary.summary
subdfs += [summary]
dataframe = pd.concat(subdfs, axis=0).reset_index(drop=True)
return dataframe | 8be5968ee513da80910df227156e2ceb02624941 | 11,814 |
def timestamp_diff(time_point_unit: TimePointUnit, time_point1, time_point2) -> Expression:
"""
Returns the (signed) number of :class:`~pyflink.table.expression.TimePointUnit` between
time_point1 and time_point2.
For example,
`timestamp_diff(TimePointUnit.DAY, lit("2016-06-15").to_date, lit("2016-06-18").to_date`
leads to 3.
:param time_point_unit: The unit to compute diff.
:param time_point1: The first point in time.
:param time_point2: The second point in time.
:return: The number of intervals as integer value.
"""
return _ternary_op("timestampDiff", time_point_unit._to_j_time_point_unit(),
time_point1, time_point2) | 711c41adf3472b2dd0ada51160aefca432ed2bc6 | 11,815 |
import tqdm
def plot_solar_twins_results(star_postfix=''):
"""Plot results for 17 pairs with q-coefficients for solar twins"""
def format_pair_label(pair_label):
"""Format a pair label for printing with MNRAS ion format.
Parameters
----------
pair_label : str
A pair label of the form "4492.660Fe2_4503.480Mn1_25"
Returns
-------
dict
A dictionary containing LaTeX-formatted representations of the two
transitions in the pair label.
"""
t1, t2, order_num = pair_label.split('_')
# This mimics the look of ion labels in MNRAS.
new_label1 = f"{t1[8:-1]}" + r"\," + r"\textsc{\lowercase{" +\
f"{roman_numerals[t1[-1]]}" + r"}}" + r"\ " + f"{t1[:8]}"
new_label2 = f"{t2[8:-1]}" + r"\," + r"\textsc{\lowercase{" +\
f"{roman_numerals[t2[-1]]}" + r"}}" + r"\ " + f"{t2[:8]}"
return {'ion1': new_label1, 'ion2': new_label2}
roman_numerals = {'1': 'I', '2': 'II'}
# Get labels of the 17 pairs on the shortlist.
pairs_file = vcl.data_dir / '17_pairs.txt'
pair_labels = np.loadtxt(pairs_file, dtype=str)
# Get the 18 solar twins.
stars = {star_name: Star(star_name + star_postfix,
vcl.output_dir / star_name)
for star_name in sp1_stars}
# Set out lists of star for the top and bottom panels.
block1_stars = ('Vesta', 'HD76151', 'HD78429',
'HD140538', 'HD146233', 'HD157347')
block2_stars = ('HD20782', 'HD19467', 'HD45184',
'HD45289', 'HD171665',)
block3_stars = ('HD138573', 'HD183658', 'HD220507', 'HD222582')
block4_stars = ('HD1835', 'HD30495', 'HD78660', )
block1_width = 25
block1_ticks = 15
block2_width = 45
block2_ticks = 30
block3_width = 75
block3_ticks = 50
block4_width = 125
block4_ticks = 75
fig = plt.figure(figsize=(18, 10.5), tight_layout=True)
gs = GridSpec(ncols=20, nrows=4, figure=fig, wspace=0,
height_ratios=(len(block1_stars),
len(block2_stars),
len(block3_stars),
len(block4_stars)))
# Set the "velocity" title to be below the figure.
fig.supxlabel('Diffrence between pair velocity separation and model (m/s)',
fontsize=18)
# Create a dict to hold all the axes.
axes = {}
# Create top panel (with pair labels)
# Create tick locations to put the grid at.
y_grid_locations = [y+0.5 for y in range(len(block1_stars))]
for i, label in (enumerate(pair_labels)):
ax = fig.add_subplot(gs[0, i])
ax.axvline(x=0, color='Black', linestyle='--', linewidth=1.7,
zorder=1)
# Set the limits of each axis.
ax.set_ylim(top=-0.5, bottom=len(block1_stars)-0.5)
ax.set_xlim(left=-block1_width, right=block1_width)
# Add the grid.
ax.yaxis.set_minor_locator(ticker.FixedLocator(y_grid_locations))
ax.yaxis.grid(which='minor', color='LightGray', linewidth=1.8,
linestyle=':', zorder=0)
# Remove all the ticks and labels on the y-axes (left-most will have
# them specially added back in).
ax.tick_params(axis='y', which='both', left=False, right=False,
labelleft=False, labelright=False)
ax.tick_params(axis='x', which='both', top=False, bottom=True,
labeltop=False, labelbottom=True, labelsize=12)
ax.xaxis.set_major_locator(ticker.FixedLocator(
(-block1_ticks, 0, block1_ticks)))
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
# This sets the width of the outside edges of the subaxes.
for axis in ['top', 'right', 'bottom', 'left']:
ax.spines[axis].set_linewidth(2.1)
ax.spines[axis].set_zorder(20)
# Add the tick labels for each pair at the top of the plot.
ax_twin = ax.twiny()
ax_twin.set_xlim(ax.get_xlim())
ax_twin.tick_params(top=False, labelsize=16)
t1, t2, order_num = label.split('_')
if i > 5:
ax_twin.xaxis.set_major_locator(ticker.FixedLocator((-12,)))
ax_twin.set_xticklabels(('{ion1}\n{ion2}'.format(
**format_pair_label(label)),),
fontdict={'rotation': 90,
'horizontalalignment': 'left',
'verticalalignment': 'bottom'})
elif i in (0, 2, 4):
ax_twin.xaxis.set_major_locator(ticker.FixedLocator((-11, 12)))
ax_twin.set_xticklabels((f'Order: {str(order_num)}',
'{ion1}\n{ion2}'.format(
**format_pair_label(label)),),
fontdict={'rotation': 90,
'horizontalalignment': 'left',
'verticalalignment': 'bottom'})
elif i in (1, 3, 5):
ax_twin.xaxis.set_major_locator(ticker.FixedLocator((2,)))
ax_twin.set_xticklabels((f'Order: {str(order_num)}',),
fontdict={'rotation': 90,
'horizontalalignment': 'left',
'verticalalignment': 'bottom'})
# Add axis to axes dictionary.
axes[(0, i)] = ax
# Create second panel
y_grid_locations = [y+0.5 for y in range(len(block2_stars))]
for i, label in (enumerate(pair_labels)):
ax = fig.add_subplot(gs[1, i])
ax.axvline(x=0, color='Black', linestyle='--', linewidth=1.7)
ax.set_ylim(top=-0.5, bottom=len(block2_stars)-0.5)
ax.set_xlim(left=-block2_width, right=block2_width)
ax.yaxis.set_minor_locator(ticker.FixedLocator(y_grid_locations))
ax.yaxis.grid(which='minor', color='LightGray', linewidth=1.8,
linestyle=':')
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.xaxis.set_major_locator(ticker.FixedLocator(
(-block2_ticks, 0, block2_ticks)))
ax.tick_params(which='both', labelleft=False, labelbottom=True,
left=False, right=False, top=False, bottom=True,
labelsize=12)
for axis in ['top', 'right', 'bottom', 'left']:
ax.spines[axis].set_linewidth(2.1)
ax.spines[axis].set_zorder(20)
axes[(1, i)] = ax
# Create third panel
y_grid_locations = [y+0.5 for y in range(len(block3_stars))]
for i, label in (enumerate(pair_labels)):
ax = fig.add_subplot(gs[2, i])
ax.axvline(x=0, color='Black', linestyle='--', linewidth=1.7)
ax.set_ylim(top=-0.5, bottom=len(block3_stars)-0.5)
ax.set_xlim(left=-block3_width, right=block3_width)
ax.yaxis.set_minor_locator(ticker.FixedLocator(y_grid_locations))
ax.yaxis.grid(which='minor', color='LightGray', linewidth=1.8,
linestyle=':')
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.xaxis.set_major_locator(ticker.FixedLocator(
(-block3_ticks, 0, block3_ticks)))
ax.tick_params(which='both', labelleft=False, labelbottom=True,
left=False, right=False, top=False, bottom=True,
labelsize=12)
for axis in ['top', 'right', 'bottom', 'left']:
ax.spines[axis].set_linewidth(2.1)
ax.spines[axis].set_zorder(20)
axes[(2, i)] = ax
# Create fourth panel
y_grid_locations = [y+0.5 for y in range(len(block4_stars))]
for i, label in (enumerate(pair_labels)):
ax = fig.add_subplot(gs[3, i])
ax.axvline(x=0, color='Black', linestyle='--', linewidth=1.7)
ax.set_ylim(top=-0.5, bottom=len(block4_stars)-0.5)
ax.set_xlim(left=-block4_width, right=block4_width)
ax.yaxis.set_minor_locator(ticker.FixedLocator(y_grid_locations))
ax.yaxis.grid(which='minor', color='LightGray', linewidth=1.8,
linestyle=':')
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.xaxis.set_major_locator(ticker.FixedLocator(
(-block4_ticks, 0, block4_ticks)))
ax.tick_params(which='both', labelleft=False, labelbottom=True,
left=False, right=False, top=False, bottom=True,
labelsize=12)
for axis in ['top', 'right', 'bottom', 'left']:
ax.spines[axis].set_linewidth(2.1)
ax.spines[axis].set_zorder(20)
axes[(3, i)] = ax
# Set the left-most axes to have y-labels for star names.
for i in range(4):
axes[(i, 0)].tick_params(labelleft=True)
# Create the locations for minor ticks to put the star name labels at.
for i, block in enumerate((block1_stars, block2_stars,
block3_stars, block4_stars)):
y_ticks = [y for y in range(len(block))]
axes[(i, 0)].yaxis.set_major_locator(ticker.FixedLocator(y_ticks))
# Create the list of top stars...have to handle Vesta specially.
top_labels = ['Sun']
top_labels.extend([' '.join((x[:2], x[2:])) for x in block1_stars[1:]])
axes[(0, 0)].set_yticklabels(top_labels,
fontdict={'horizontalalignment': 'right',
'fontsize': 15})
for i, star_names in enumerate((block2_stars, block3_stars, block4_stars)):
axes[(i+1, 0)].set_yticklabels([' '.join((x[:2], x[2:]))
for x in star_names],
fontdict={
'horizontalalignment': 'right',
'fontsize': 15})
# Define colors for pre- and post- eras.
pre_color = cmr.ember(0.7)
post_color = cmr.cosmic(0.55)
# How significant to report outliers.
sigma_significance = 3
vprint(f'Looking for outliers beyond {sigma_significance} sigma')
# Create lists to hold the significance values:
pre_stat, pre_sys = [], []
post_stat, post_sys = [], []
for i, pair_label in enumerate(pair_labels):
# Create lists to hold the values and errors:
pre_values, post_values = [], []
pre_err_stat, post_err_stat = [], []
pre_err_sys, post_err_sys = [], []
# Figure out some numbers for locating things from star name.
for star_name in sp1_stars:
if star_name in block1_stars:
row = 0
j = block1_stars.index(star_name)
elif star_name in block2_stars:
row = 1
j = block2_stars.index(star_name)
elif star_name in block3_stars:
row = 2
j = block3_stars.index(star_name)
elif star_name in block4_stars:
row = 3
j = block4_stars.index(star_name)
else:
raise RuntimeError(f"{star_name} not in any list!")
star = stars[star_name]
pair_index = star.p_index(pair_label)
fiber_split_index = star.fiberSplitIndex
# Get the pre-change values.
if star.hasObsPre:
values, mask = remove_nans(star.pairModelOffsetsArray[
:fiber_split_index, pair_index], return_mask=True)
errors = star.pairModelErrorsArray[:fiber_split_index,
pair_index][mask]
plot = True
try:
value, error = weighted_mean_and_error(values, errors)
except ZeroDivisionError:
# This indicates no value for a particular 'cell', so just
# plot something there to indicate that.
axes[(row, i)].plot(0, j-0.15, color='Black', marker='x',
markersize=7, zorder=10)
plot = False
if plot:
# Compute error with sigma_** included.
sigma_s2s = star.pairSysErrorsArray[0, pair_index]
full_error = np.sqrt(error**2 + sigma_s2s**2)
sig_stat = float((value / error).value)
sig_sys = float((value / full_error).value)
pre_stat.append(sig_stat)
pre_sys.append(sig_sys)
if abs(sig_sys) > sigma_significance:
vprint(f'{star.name}: {pair_label}:'
f' (Pre) {sig_sys:.2f}')
pre_values.append(value)
pre_err_stat.append(error)
pre_err_sys.append(full_error)
if (star.name == 'HD1835') and\
(pair_label == '4759.449Ti1_4760.600Ti1_32'):
vprint('For HD 1835, 4759.449Ti1_4760.600Ti1_32:')
vprint(f'Value: {value:.3f}, error: {full_error:.3f}')
# First plot an errorbar with sigma_** included.
axes[(row, i)].errorbar(value, j-0.15,
xerr=full_error,
ecolor=pre_color,
marker='',
capsize=3,
capthick=1.5,
elinewidth=1.4,
zorder=11)
# Then plot just the star's statistical error.
axes[(row, i)].errorbar(value, j-0.15,
xerr=error,
markerfacecolor=pre_color,
markeredgecolor='Black',
ecolor=pre_color,
markeredgewidth=2,
marker='o',
markersize=9,
capsize=5,
elinewidth=4,
zorder=12)
# Get the post-change values.
if star.hasObsPost:
values, mask = remove_nans(star.pairModelOffsetsArray[
fiber_split_index:, pair_index], return_mask=True)
errors = star.pairModelErrorsArray[fiber_split_index:,
pair_index][mask]
plot = True
try:
value, error = weighted_mean_and_error(values, errors)
except ZeroDivisionError:
axes[(row, i)].plot(0, j+0.15, color='Black', marker='x',
markersize=7)
plot = False
if plot:
sigma_s2s = star.pairSysErrorsArray[1, pair_index]
full_error = np.sqrt(error**2 + sigma_s2s**2)
sig_stat = float((value / error).value)
sig_sys = float((value / full_error).value)
post_stat.append(sig_stat)
post_sys.append(sig_sys)
if abs(sig_sys) > sigma_significance:
vprint(f'{star.name}: {pair_label}:'
f' (Post) {sig_sys:.2f}')
post_values.append(value)
post_err_stat.append(error)
post_err_sys.append(full_error)
axes[(row, i)].errorbar(value, j+0.15,
xerr=full_error,
ecolor=post_color,
marker='',
capsize=4,
capthick=1.5,
elinewidth=1.5,
zorder=13)
axes[(row, i)].errorbar(value, j+0.15,
xerr=error,
markerfacecolor=post_color,
markeredgecolor='Black',
ecolor=post_color,
markeredgewidth=2,
marker='D',
markersize=8.5,
capsize=5,
elinewidth=4,
zorder=14)
# Print some metrics for the pair.
pre_val_arr = np.array(pre_values)
pre_err_arr_stat = np.array(pre_err_stat)
pre_err_arr_sys = np.array(pre_err_sys)
post_val_arr = np.array(post_values)
post_err_arr_stat = np.array(post_err_stat)
post_err_arr_sys = np.array(post_err_sys)
wm_value_pre, error_pre = weighted_mean_and_error(
pre_val_arr, pre_err_arr_sys)
wm_value_post, error_post = weighted_mean_and_error(
post_val_arr, post_err_arr_sys)
chi_2_pre_stat = fit.calc_chi_squared_nu(
pre_val_arr, pre_err_arr_stat, 1)
chi_2_pre_sys = fit.calc_chi_squared_nu(
pre_val_arr, pre_err_arr_sys, 1)
chi_2_post_stat = fit.calc_chi_squared_nu(
post_val_arr, post_err_arr_stat, 1)
chi_2_post_sys = fit.calc_chi_squared_nu(
post_val_arr, post_err_arr_sys, 1)
vprint(f'For {pair_label}:')
vprint(' Pre : Weighted mean:'
f' {wm_value_pre:.2f} ± {error_pre:.2f} m/s')
vprint(f' Pre : chi^2: {chi_2_pre_stat:.2f}, {chi_2_pre_sys:.2f}')
vprint(f' Pre : mean error: {np.mean(pre_err_arr_sys):.2f} m/s')
vprint(' Post: Weighted mean:'
f' {wm_value_post:.2f} ± {error_post:.2f} m/s')
vprint(f' Post: chi^2: {chi_2_post_stat:.2f}, {chi_2_post_sys:.2f}')
vprint(f' Post: mean error: {np.mean(post_err_arr_sys):.2f} m/s')
# Create the histogram plots for the pair.
fig_hist = plt.figure(figsize=(5.5, 5.5), tight_layout=True)
bins = np.linspace(-3, 3, num=25)
ax_hist = fig_hist.add_subplot(1, 1, 1)
ax_hist.set_xlabel(r'Significance ($\sigma$)')
ax_hist.set_ylabel('N')
ax_hist.xaxis.set_major_locator(ticker.FixedLocator((-3, -2, -1,
0, 1, 2, 3)))
ax_hist.xaxis.set_minor_locator(ticker.FixedLocator(bins))
ax_hist.yaxis.set_minor_locator(ticker.AutoMinorLocator())
# Add the pre and post distributions together here.
pre_stat.extend(post_stat)
pre_sys.extend(post_sys)
one_sigma, two_sigma = 0, 0
for x in pre_sys:
y = abs(x)
if y < 1:
one_sigma += 1
two_sigma += 1
elif y < 2:
two_sigma += 1
vprint(f'{one_sigma/len(pre_sys):.1%} of values within 1 sigma.')
vprint(f'{two_sigma/len(pre_sys):.1%} of values within 2 sigma.')
ax_hist.hist(pre_stat, color='Gray', histtype='step',
bins=bins, linewidth=1.8, label='Stat. only')
ax_hist.hist(pre_sys, color='Black', histtype='step',
bins=bins, linewidth=2.6, label='Stat. + Sys.')
ax_hist.legend(loc='upper right', fontsize=16,
shadow=True)
outfile = plots_dir / f'Pair_offsets_17_pairs{star_postfix}.pdf'
fig.savefig(str(outfile), bbox_inches='tight', pad_inches=0.01)
histfile = plots_dir / f'Pair_offsets_histograms{star_postfix}.pdf'
fig_hist.savefig(str(histfile), bbox_inches='tight', pad_inches=0.01)
# Create an excerpt of a single column.
fig_ex = plt.figure(figsize=(5, 6), tight_layout=True)
ax_ex = fig_ex.add_subplot(1, 1, 1)
y_grid_locations = [y+0.5 for y in range(len(sp1_stars))]
ax_ex.axvline(x=0, color='Black', linestyle='--', linewidth=1.7)
ax_ex.set_ylim(top=-0.5, bottom=len(sp1_stars)-0.5)
ax_ex.set_xlim(left=-40, right=40)
ax_ex.yaxis.set_minor_locator(ticker.FixedLocator(y_grid_locations))
ax_ex.yaxis.grid(which='minor', color='LightGray', linewidth=1.8,
linestyle=':')
ax_ex.xaxis.set_minor_locator(ticker.AutoMinorLocator())
# ax_ex.xaxis.set_major_locator(ticker.FixedLocator(
# [-50, -25, 0, 25, 50]))
ax_ex.tick_params(which='both', labelleft=True, labelbottom=True,
left=False, right=False, top=False, bottom=True,
labelsize=12)
for axis in ['top', 'right', 'bottom', 'left']:
ax_ex.spines[axis].set_linewidth(2.1)
ax_ex.spines[axis].set_zorder(20)
ax_ex.set_xlabel('Pair model offset (m/s)', size=15)
# Add labels to axis.
# Create the locations for major ticks to put the star name labels at.
y_ticks = [y for y in range(len(sp1_stars))]
ax_ex.yaxis.set_major_locator(ticker.FixedLocator(y_ticks))
# Create the list of top stars...have to handle Vesta specially.
ex_labels = ['Sun']
ex_labels.extend([' '.join((x[:2], x[2:])) for x in sp1_stars[1:]])
ax_ex.set_yticklabels(ex_labels,
fontdict={'horizontalalignment': 'right',
'fontsize': 15})
# Set the pair label to use.
pair_label = pair_labels[10] # 6138--6139
pair_label = pair_labels[16]
tqdm.write(f'Using pair {pair_label} for excerpt')
for j, star_name in enumerate(sp1_stars):
star = stars[star_name]
pair_index = star.p_index(pair_label)
fiber_split_index = star.fiberSplitIndex
# Get the pre-change values.
if star.hasObsPre:
values, mask = remove_nans(star.pairModelOffsetsArray[
:fiber_split_index, pair_index], return_mask=True)
errors = star.pairModelErrorsArray[:fiber_split_index,
pair_index][mask]
try:
value, error = weighted_mean_and_error(values, errors)
except ZeroDivisionError:
# This indicates no value for a particular 'cell', so just
# plot something there to indicate that.
ax_ex.plot(0, j, color='Black', marker='x',
markersize=7, zorder=10)
continue
# Compute error with sigma_** included.
sigma_s2s = star.pairSysErrorsArray[0, pair_index]
full_error = np.sqrt(error**2 + sigma_s2s**2)
significance = abs(value / full_error).value
if significance > sigma_significance:
vprint(f'{star.name}: {pair_label}:'
f' (Pre) {significance:.2f}')
# First plot an errorbar with sigma_** included.
ax_ex.errorbar(value, j-0.15,
xerr=full_error,
ecolor=pre_color,
marker='',
capsize=3,
capthick=1.5,
elinewidth=1.4,
zorder=11)
# Then plot just the star's statistical error.
ax_ex.errorbar(value, j-0.15,
xerr=error,
markerfacecolor=pre_color,
markeredgecolor='Black',
ecolor=pre_color,
markeredgewidth=2, # controls capthick
marker='o',
markersize=9,
capsize=5,
elinewidth=4,
zorder=12)
# Get the post-change values.
if star.hasObsPost:
values, mask = remove_nans(star.pairModelOffsetsArray[
fiber_split_index:, pair_index], return_mask=True)
errors = star.pairModelErrorsArray[fiber_split_index:,
pair_index][mask]
try:
value, error = weighted_mean_and_error(values, errors)
except ZeroDivisionError:
ax_ex.plot(0, j, color='Black', marker='x',
markersize=7)
continue
sigma_s2s = star.pairSysErrorsArray[1, pair_index]
full_error = np.sqrt(error**2 + sigma_s2s**2)
significance = abs(value / full_error).value
if significance > sigma_significance:
vprint(f'{star.name}: {pair_label}:'
f' (Post) {significance:.2f}')
ax_ex.errorbar(value, j+0.15,
xerr=full_error,
ecolor=post_color,
marker='',
capsize=4,
capthick=1.5,
elinewidth=1.5,
zorder=13)
ax_ex.errorbar(value, j+0.15,
xerr=error,
markerfacecolor=post_color,
markeredgecolor='Black',
ecolor=post_color,
markeredgewidth=2,
marker='D',
markersize=8.5,
capsize=5,
elinewidth=4,
zorder=14)
outfile = plots_dir /\
f'Pair_offsets_17_pairs_excerpt_{pair_label.replace(".", "_")}.pdf'
fig_ex.savefig(str(outfile), bbox_inches='tight', pad_inches=0.01) | c4bd9120891d435dd631394b7b67255bd75fc8d2 | 11,816 |
def equalize(pil_img: Image.Image, level: float):
"""Equalize an image.
.. seealso:: :func:`PIL.ImageOps.equalize`.
Args:
pil_img (Image.Image): The image.
level (float): The intensity.
"""
del level # unused
return ImageOps.equalize(pil_img) | f0771453063b803824571056924397e1f7bb77a3 | 11,817 |
def lightfm_trainer(
train: np.ndarray, loss: str, n_components: int, lam: float
) -> None:
"""Train lightfm models."""
# detect and init the TPU
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect()
# instantiate a distribution strategy
tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu)
# instantiating the model in the strategy scope creates the model on the TPU
with tpu_strategy.scope():
# train model normally
model = LightFM(
loss=loss,
user_alpha=lam,
item_alpha=lam,
no_components=n_components,
learning_rate=0.001,
random_state=12345,
)
dataset = Dataset()
dataset.fit(train[:, 0], train[:, 1])
(interactions, weights) = dataset.build_interactions(
((x[0], x[1], 1) for x in train[train[:, 2] == 1])
)
model.fit(interactions, epochs=100)
return model | 6776b2fbcb3039cc1efb95cec0b7562b959df0fd | 11,818 |
def get_wspd_ts(path, storm, res, shpmask):
"""
Extracts the U and V component and returns the wind speed timeseries of storm_dict
Arguments:
path (str): Path containing data to load
storm (str): Name of storm
res (str): Resolution of data
Returns:
Pandas dataframe with time index
"""
ufile = f'{path}/ua.T1Hpoint.UMRA2T.*.{storm}.{res}km.nc'
vfile = f'{path}/va.T1Hpoint.UMRA2T.*.{storm}.{res}km.nc'
ucube = iris.load_cube(ufile, 'x_wind')
vcube = iris.load_cube(vfile, 'y_wind')
ucube = ucube.intersection(longitude=(75, 100), latitude=(10, 25))
vcube = vcube.intersection(longitude=(75, 100), latitude=(10, 25))
ws_ifunc = iris.analysis.maths.IFunc(calc_wspd, ws_units_func)
ws_cube = ws_ifunc(ucube, vcube, new_name='wind speed')
try:
mwspd = shpmask.mask_cube(ws_cube)
except:
print("Can't mask with shape! Masked over lon-lat box instead...")
mwspd = ws_cube
cubedata = []
timedata = []
for subcube in mwspd.slices_over('forecast_reference_time'):
# extracting the time
tcoord = subcube.coord('time')
units = tcoord.units
tdata = [units.num2date(point) for point in tcoord.points]
cube = subcube.collapsed(['latitude', 'longitude'], iris.analysis.MAX)
cubedata.append(cube.data.filled())
timedata.append(tdata)
# Convert to Pandas Dataframe with unified time index
s = list()
[s.append(pd.Series(data=cubedata[i], index=timedata[i])) for i in range(np.shape(timedata)[0])]
return pd.DataFrame(s).T | 414d39105a14a5ac2d335a3146a4e6d462f9760a | 11,819 |
def _read_output_file(path):
"""Read Stan csv file to ndarray."""
comments = []
data = []
columns = None
with open(path, "rb") as f_obj:
# read header
for line in f_obj:
if line.startswith(b"#"):
comments.append(line.strip().decode("utf-8"))
continue
columns = {key: idx for idx, key in enumerate(line.strip().decode("utf-8").split(","))}
break
# read data
for line in f_obj:
line = line.strip()
if line.startswith(b"#"):
comments.append(line.decode("utf-8"))
continue
if line:
data.append(np.array(line.split(b","), dtype=np.float64))
return columns, np.array(data, dtype=np.float64), comments | 62b312db851386900cae1643d3eb75896f45cde1 | 11,820 |
def _transform_data(raw_df, cols_config):
"""
Applies required transformations to the raw dataframe
:returns : Trasformed dataframe ready to be exported/loaded
"""
# Perform column and dtype checks
if check_columns(raw_df, cols_config):
df = raw_df
else:
logger.warning("Inconsistencies found during column check")
# Apply transformations
df = convert_dates(df)
df = get_duration(df)
df = remove_negatives(df)
df = drop_columns(df)
return df | 3874ce8bc38d0b75f037538919b1c649d8a6b8b9 | 11,821 |
import math
def mean_and_std(values):
"""Compute mean standard deviation"""
size = len(values)
mean = sum(values)/size
s = 0.0
for v in values:
s += (v - mean)**2
std = math.sqrt((1.0/(size-1)) * s)
return mean, std | 15b11e89317cc86b68262fa959b9c65a2f87bdcc | 11,823 |
from typing import Union
from typing import List
def blacken(
color: Color, amount: FloatOrFloatIterable
) -> Union[Color, List[Color]]:
"""
Return a color or colors amount fraction or fractions of the way from
`color` to `black`.
:param color: The existing color.
:param amount: The proportion to blacken by.
"""
return cross_fade(from_color=color, to_color='black',
amount=amount) | 26c74556b8d73692ec4afbb763221c508c6a941b | 11,824 |
def llf_gradient_sigma_neq_gamma(history, sum_less_equal=True):
"""
Calculate the gradient of the log-likelihood function symbolically.
Parameters
----------
sum_less_equal : bool, default: True
This arg is passed to :meth:`self.llf_sigma_eq_gamma`.
Returns
-------
gradient : sympy.Array
An array containing four entries. The first (second) [third]
{fourth} entry is the derivative of the log-likelihood function
w.r.t. beta (sigma) [gamma] {N} parameter.
"""
beta, sigma, gamma, n = symbols("beta sigma gamma n")
return derive_by_array(
llf_sigma_neq_gamma(history, sum_less_equal),
[beta, sigma, gamma, n]
) | b3efce2413b5f88e4c7b76117f4f668a5f386b30 | 11,825 |
def selection_support_df(df, combinations, min_support):
"""
selection combinations with support
Parameters
----------
df : pandas.DataFrame
data to be selected.
for example :
= | banana | mango | apple |
| 1 | 1 | 1 |
| 1 | 0 | 0 |
| 1 | 1 | 0 |
combinations : list
combinations of df columns.
for example :
= [("apple", "apple"), ("banana", "apple"), ("mango", "apple")
("apple", "banana", "apple"), ("apple", "mango", "apple"),
("banana", "mango", "apple"), ("apple",), ...]
min_support : float
minimal support to be select combinations
for example :
= 0.5
Returns
-------
combinations and supports.
for example :
= [("banana", "mango", "apple"), ...]
= [0.1, ...]
"""
selected_supports = []
selected_combinations = []
columns = df.columns
n_rows = df.shape[0]
for combination in combinations:
position = position_itemset(combination, columns)
position_columns = np.array(columns[position])
length_combination = len(combination)
combination_array = np.array(df.loc[:, position_columns])
check_array = np.where(length_combination == combination_array.sum(axis=1))[0]
length_check_array = len(check_array)
support = cal_support(length_check_array, n_rows)
if support >= min_support:
selected_combinations.append(combination)
selected_supports.append(support)
return selected_combinations, selected_supports | 4decb66dfe913a62e0b3b67d9a61a6941ec6ff76 | 11,826 |
from trie import TrieTree
def empty_trie_tree():
"""Empty trie tree fixture."""
return TrieTree() | d68ae38a810e02015b3967eb44bb3dda8445afd7 | 11,827 |
def bind_context_to_node(context, node):
"""Give a context a boundnode
to retrieve the correct function name or attribute value
with from further inference.
Do not use an existing context since the boundnode could then
be incorrectly propagated higher up in the call stack.
:param context: Context to use
:type context: Optional(context)
:param node: Node to do name lookups from
:type node NodeNG:
:returns: A new context
:rtype: InferenceContext
"""
context = copy_context(context)
context.boundnode = node
return context | 92ce7a9d155e621e54ad90f5aefb49bda4ea60df | 11,828 |
def get_weight_matrix(file_handle):
"""
Read each line in file_handle and return the weight matrix as a dict,
in which each key is the original node name, and each value is a nested
dict, whose keys are gene systematic names, and values are weights.
"""
weight_matrix = dict()
for line_num, line in enumerate(file_handle, start=1):
tokens = line.strip().split('\t')
# The first line includes node names only
if line_num == 1:
num_columns = len(tokens)
nodes = tokens[1:]
for node_name in nodes:
weight_matrix[node_name] = dict()
else: # read data lines
# Validate the number of columns in each line
if num_columns != len(tokens):
raise Exception(f"Incorrect number of columns on line {line_num}")
gene_name = tokens[0]
weights = [float(x) for x in tokens[1:]]
for idx, w in enumerate(weights):
node_name = nodes[idx]
weight_matrix[node_name][gene_name] = w
return weight_matrix | 08773c5ff852814855e4a042bb79acc82d09b067 | 11,829 |
def get_uptime():
"""
Get uptime
"""
try:
with open('/proc/uptime', 'r') as f:
uptime_seconds = float(f.readline().split()[0])
uptime_time = str(timedelta(seconds=uptime_seconds))
data = uptime_time.split('.', 1)[0]
except Exception as err:
data = str(err)
return data | fc783a24b7239c43b69c44ea30b62465a775761d | 11,830 |
import json
def measure_list_for_upcoming_elections_retrieve_api_view(request): # measureListForUpcomingElectionsRetrieve
"""
Ask for all measures for the elections in google_civic_election_id_list
:param request:
:return:
"""
status = ""
google_civic_election_id_list = request.GET.getlist('google_civic_election_id_list[]')
state_code = request.GET.get('state_code', '')
# We will need all candidates for all upcoming elections so we can search the HTML of
# the possible voter guide for these names
measure_list_light = []
results = retrieve_measure_list_for_all_upcoming_elections(google_civic_election_id_list,
limit_to_this_state_code=state_code)
if results['measure_list_found']:
measure_list_light = results['measure_list_light']
expand_results = add_measure_name_alternatives_to_measure_list_light(measure_list_light)
if expand_results['success']:
measure_list_light = expand_results['measure_list_light']
google_civic_election_id_list = results['google_civic_election_id_list']
status += results['status']
success = results['success']
json_data = {
'status': status,
'success': success,
'google_civic_election_id_list': google_civic_election_id_list,
'measure_list': measure_list_light,
}
return HttpResponse(json.dumps(json_data), content_type='application/json') | 449bcde309c6c224521fcf5f0acc6de427d30f55 | 11,831 |
from fedelemflowlist.globals import flow_list_fields
def get_required_flowlist_fields():
"""
Gets required field names for Flow List.
:return:list of required fields
"""
required_fields = []
for k, v in flow_list_fields.items():
if v[1]['required']:
required_fields.append(k)
return required_fields | c2581cde45e9aad0c09620f98557a777a5d89bdb | 11,833 |
def sparse_table_function(*, index, data) -> callable:
"""
The very simplest Python-ish "sparse matrix", and plenty fast on modern hardware, for the
size of tables this module will probably ever see, is an ordinary Python dictionary from
<row,column> tuples to significant table entries. There are better ways if you program
closer to the bare metal, but this serves the purpose.
This routine unpacks "compressed-sparse-row"-style data into an equivalent Python dictionary,
then returns a means to query said dictionary according to the expected 2-dimensional interface.
"""
hashmap = {}
for row_id, (Cs, Ds) in enumerate(zip(index, data)):
if isinstance(Ds, int): # All non-blank cells this row have the same value:
for column_id in Cs: hashmap[row_id, column_id] = Ds
else:
for column_id, d in zip(Cs, Ds) if Cs else enumerate(Ds):
hashmap[row_id, column_id] = d
return lambda R, C: hashmap.get((R, C)) | a1c3f11f5fd9c2ba4d048a69271db48bc61b26df | 11,834 |
import asyncio
async def _get_db_connection() -> asyncpg.Connection:
"""
Initialise database connection.
On failure, retry multiple times. When the DB starts in parallel with the app (with Compose),
it may not yet be ready to take connections.
"""
log.info("Creating DB connection")
n_attempts = 3
for attempt in range(1, n_attempts + 1):
try:
return await asyncpg.connect(connection_string)
except ConnectionError:
log.info(f"Failed to connect to DB (attempt: {attempt}/{n_attempts})")
if attempt >= n_attempts:
raise
await asyncio.sleep(5) | 9f2e83b4b98f0d292b352682bf380ff4921e5fba | 11,835 |
def get_view_content(view):
""" Returns view content as string. """
return utils.execute_in_sublime_main_thread(lambda: view.substr(sublime.Region(0, view.size()))) | 4dd8d4c9dfa891b31251f32ad6813549c0c453b0 | 11,836 |
def signed_byte8(x: IntVar) -> Int8:
"""Implementation for `SBYTE8`."""
return signed_byte_n(x, 8) | d4e16c80336a0259b2acb4faf1ff329d90aa21b2 | 11,837 |
import numpy
import itertools
def join(zma1, zma2, join_key_mat, join_name_mat, join_val_dct):
""" join two z-matrices together
"""
syms1 = symbols(zma1)
syms2 = symbols(zma2)
natms1 = count(zma1)
natms2 = count(zma2)
key_mat1 = numpy.array(key_matrix(zma1))
key_mat2 = numpy.array(key_matrix(zma2, shift=natms1)) # note the shift
name_mat1 = numpy.array(name_matrix(zma1))
name_mat2 = numpy.array(name_matrix(zma2))
val_dct1 = values(zma1)
val_dct2 = values(zma2)
join_natms = min(natms2, 3)
assert len(join_key_mat) == len(join_name_mat) == join_natms
join_key_mat = numpy.array(join_key_mat, dtype=numpy.object_)
join_name_mat = numpy.array(join_name_mat, dtype=numpy.object_)
# make sure we aren't overwriting values -- the constructor should take
# care of the rest of the necessary validation
assert numpy.all(numpy.equal(join_key_mat, None) ==
numpy.equal(join_key_mat, None))
join_idxs = numpy.not_equal(join_key_mat, None)
assert numpy.all(numpy.equal(key_mat2[:3][join_idxs], None))
assert numpy.all(numpy.equal(name_mat2[:3][join_idxs], None))
key_mat2[:3][join_idxs] = join_key_mat[join_idxs]
name_mat2[:3][join_idxs] = join_name_mat[join_idxs]
syms = tuple(itertools.chain(syms1, syms2))
key_mat = tuple(itertools.chain(key_mat1, key_mat2))
name_mat = tuple(itertools.chain(name_mat1, name_mat2))
# Could be made to allow for joins with common zma1 and zma2 names (for
# symmetry constraints). Not sure if we really want that.
val_dct = val_dct1.copy()
assert not set(val_dct.keys()) & set(val_dct2.keys())
assert not set(val_dct.keys()) & set(join_val_dct.keys())
val_dct.update(val_dct2)
val_dct.update(join_val_dct)
return automol.create.zmatrix.from_data(syms, key_mat, name_mat, val_dct) | de55377d436ce50d8c60c97992e940e53a7c9ecc | 11,838 |
def multi_to_weighted(G: nx.MultiDiGraph):
"""
Converts a multidigraph into a weighted digraph.
"""
nG = nx.DiGraph(G)
# nG.add_nodes_from(G.nodes)
nG.name = G.name + "_weighted_nomulti"
edge_weights = {(u, v): 0 for u, v, k in G.edges}
for u, v, key in G.edges:
edge_weights[(u, v)] += 1
# nG.add_edges_from(edge_weights.keys())
nx.set_edge_attributes(nG, edge_weights, "weight")
return nG | 0dd14a02c923c8c238c82399f51701639dc82756 | 11,839 |
def RetentionInDaysMatch(days):
"""Test whether the string matches retention in days pattern.
Args:
days: string to match for retention specified in days format.
Returns:
Returns a match object if the string matches the retention in days
pattern. The match object will contain a 'number' group for the duration
in number of days. Otherwise, None is returned.
"""
return _RETENTION_IN_DAYS().match(days) | 0b2cded5d01bcb294df1fab956dbe54c9c5e03ae | 11,840 |
from typing import Tuple
import re
def _extract_result_details(pipx_output: str) -> Tuple[str, str, str]:
""" Extracts name and version from pipx's stdout """
match = re.search(r'installed package(.*),(.*)\n.*\n.*?-(.*)', pipx_output)
if match:
package, python_version, plugin_name = map(str.strip, match.groups())
return plugin_name.replace('.exe', ''), package, python_version
raise PluginManagementFatalException('Failed to find package information install log!') | ae7a588bbb60b47aa889a4dcb7421a55b55b8e2f | 11,841 |
def get_pwr_SXT(sxt_los, plasma, emiss, num_pts=100, labels=labels_full):
"""
"""
pwr_int = {}
for ll in labels:
# Get the appropriate database label
filt = ll.split()[1]
pix_los = sxt_los[ll]
# Get the spatial points along the line of sight
num_pixels = len(pix_los)
ell_pts = np.linspace(-0.5, 0.5, num=num_pts)
xs = np.zeros([num_pixels, num_pts])
ys = np.zeros([num_pixels, num_pts])
for index,los in enumerate(pix_los):
#xs[index,:], ys[index,:] = list(zip(*[los.get_xy(ell) for ell in ell_pts]))
xs[index,:], ys[index,:] = los.get_xy(ell_pts)
# Evaluate the profiles
Te_xs = np.maximum(plasma.Te(xs, ys), 10.0)
ne_xs = np.maximum(plasma.ne(xs, ys), 1e15)
n0_xs = plasma.n0(xs, ys)
pts = list( zip( Te_xs.ravel(), ne_xs.ravel()/1e19, n0_xs.ravel()/1e14 ) )
# Evaluate deuterium using quasi-netrality
nZ_xs = {ion:plasma.nZ[ion](xs,ys) for ion in plasma.impurities}
nZ_xs['D'] = plasma.nD(xs, ys)
# Calculate the emission array
emiss_xs = np.zeros(xs.shape)
emiss_xs = ne_xs*nZ_xs['D']*np.reshape(emiss['D'][filt](pts), xs.shape)
for ion in plasma.impurities:
emiss_xs += ne_xs*nZ_xs[ion]*np.reshape(emiss[ion][filt](pts), xs.shape)
# Integrate with the trapezoidal rule
dl = np.ones([num_pts,1])*(ell_pts[1] - ell_pts[0])
dl[0] *= 0.5
dl[-1] *= 0.5
pwr_int[ll] = np.squeeze(np.dot(emiss_xs, dl))
return pwr_int | ba1a1831f5fd2ee18ce95214b696d37c2e33b456 | 11,842 |
import requests
import json
def get_mstp_port(auth):
"""
Function to get list of mstp port status
:param auth: AOSSAuth class object returned by pyarubaoss.auth
:return list of mstp port status
:rtype dict
"""
url_mstp_port = "http://" + auth.ipaddr + "/rest/"+auth.version+"/mstp/port"
try:
r = requests.get(url_mstp_port, headers=auth.cookie)
mstp_port = json.loads(r.text)['mstp_port_element']
return mstp_port
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " get_mstp_port: An Error has occured" | aa74f2d9c5b04f7744009c2862b0f1bcff57a6dc | 11,843 |
from typing import Union
from typing import Callable
def touch(v: Union[Callable, str], default=None):
"""
Touch a function or an expression `v`, see if it causes exception.
If not, output the result, otherwise, output `default`.
Note:
Use `default = pycamia.functions.identity_function` (or write one yourself)
to return the exception object.
Example:
----------
>>> a = 0
>>> touch(lambda: 1/a, default = 'fail')
fail
"""
if not callable(default):
default = const_function(default)
if isinstance(v, str):
local_vars = get_environ_locals()
local_vars.update(locals())
locals().update(local_vars)
try: return eval(v)
except Exception as e: return default(e)
else:
try: return v()
except Exception as e: return default(e) | 90b5395eb68daadb06b1bb29a52a2ca11f34353d | 11,846 |
def q_mult(q1, q2):
"""Quaternion multiplication"""
w1, x1, y1, z1 = q1
w2, x2, y2, z2 = q2
w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2
z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2
return w, x, y, z | f2623836744b9143c5eeafe1b0d71e3cfdb5d8d4 | 11,847 |
import math
def round_repeats(repeats, global_params):
"""Calculate module's repeat number of a block based on depth multiplier.
Use depth_coefficient of global_params.
Args:
repeats (int): num_repeat to be calculated.
global_params (namedtuple): Global params of the model.
Returns:
new repeat: New repeat number after calculating.
"""
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
# follow the formula transferred from official TensorFlow implementation
return int(math.ceil(multiplier * repeats)) | 9a26e19663c7ecf4b6f746b1900a9afe46311770 | 11,848 |
def estimate_label_width(labels):
"""
Given a list of labels, estimate the width in pixels
and return in a format accepted by CSS.
Necessarily an approximation, since the font is unknown
and is usually proportionally spaced.
"""
max_length = max([len(l) for l in labels])
return "{0}px".format(max(60,int(max_length*7.5))) | 1e22ad939973373a669841dd5cc318d6927249ca | 11,849 |
import six
def ssh_encrypt_text(ssh_public_key, text):
"""Encrypt text with an ssh public key.
If text is a Unicode string, encode it to UTF-8.
"""
if isinstance(text, six.text_type):
text = text.encode('utf-8')
try:
pub_bytes = ssh_public_key.encode('utf-8')
pub_key = serialization.load_ssh_public_key(
pub_bytes, backends.default_backend())
return pub_key.encrypt(text, padding.PKCS1v15())
except Exception as exc:
raise exception.EncryptionFailure(reason=six.text_type(exc)) | 2a5bfc62e08475dcd7f33ba25cf3fa76c43988a2 | 11,852 |
def naildown_entity(entity_class, entity_dict, entity, state, module, check_missing=None):
""" Ensure that a given entity has a certain state """
changed, changed_entity = False, entity
if state == 'present_with_defaults':
if entity is None:
changed, changed_entity = create_entity(entity_class, entity_dict, module)
elif state == 'present':
if entity is None:
changed, changed_entity = create_entity(entity_class, entity_dict, module)
else:
changed, changed_entity = update_entity(entity, entity_dict, module, check_missing)
elif state == 'copied':
new_entity = entity_class(name=entity_dict['new_name'], organization=entity_dict['organization']).search()
if entity is not None and len(new_entity) == 0:
changed, changed_entity = copy_entity(entity, entity_dict, module)
elif len(new_entity) == 1:
changed_entity = new_entity[0]
elif state == 'absent':
if entity is not None:
changed, changed_entity = delete_entity(entity, module)
else:
module.fail_json(msg='Not a valid state: {}'.format(state))
return changed, changed_entity | 3c5b7e8b026d4ea8444625fa7a01b43567973138 | 11,854 |
def get_all_admins():
"""
Returns a queryset of all active admin users.
"""
current_admins = User.objects.filter(is_admin=True, is_active=True)
return current_admins | befba9efb62d7b1a46c0019776d1327251e9cf9d | 11,855 |
def htx_numpy(h, x):
""" Convolution of reversed h with each line of u. Numpy implementation.
Parameters
----------
h : array, shape (n_time_hrf), HRF
x : array, shape (n_samples, n_time), neural activity signals
Return
------
h_conv_x : array, shape (n_samples, n_time_valid), convolved signals
"""
n_samples, _ = x.shape
return np.r_[[np.convolve(h[::-1], x[i], mode='valid')
for i in range(n_samples)]] | 306608179eb52f4f70e0f03da75283404201a044 | 11,856 |
import torch
def get_mask_results(probs, boxes, im_w, im_h, pixil_score_th=0.25):
"""
Args:
probs (Tensor)
boxes (ImageContainer)
Returns:
rles (list[string])
mask_pixel_scores (Tensor)
"""
device = probs.device
N, _, H, W = probs.shape
num_chunks = N if device.type == "cpu" else int(np.ceil(N * int(im_h * im_w) * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
assert num_chunks <= N, "Default GPU_MEM_LIMIT in is too small; try increasing it"
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
im_masks = torch.zeros(N, im_h, im_w, device=device, dtype=torch.bool)
im_masks_tl = torch.zeros(N, im_h, im_w, device=device, dtype=torch.bool)
im_masks_th = torch.zeros(N, im_h, im_w, device=device, dtype=torch.bool)
for i in chunks:
masks_chunk, spatial_inds = _do_paste_mask(probs[i], boxes[i], im_h, im_w, skip_empty=device.type == "cpu")
im_masks[(i,) + spatial_inds] = (masks_chunk >= 0.5).to(dtype=torch.bool)
im_masks_tl[(i,) + spatial_inds] = (masks_chunk >= pixil_score_th).to(dtype=torch.bool)
im_masks_th[(i,) + spatial_inds] = (masks_chunk >= (1 - pixil_score_th)).to(dtype=torch.bool)
mask_pixel_scores = (torch.sum(im_masks_th, dim=(1, 2)).to(dtype=torch.float32)
/ torch.sum(im_masks_tl, dim=(1, 2)).to(dtype=torch.float32).clamp(min=1e-6))
rles = []
for i in range(N):
# Too slow.
# Get RLE encoding used by the COCO evaluation API
rle = mask_util.encode(np.array(im_masks[i].unsqueeze(2).cpu(), dtype=np.uint8, order='F'))[0]
# For dumping to json, need to decode the byte string.
# https://github.com/cocodataset/cocoapi/issues/70
rle['counts'] = rle['counts'].decode('ascii')
rles.append(rle)
return rles, mask_pixel_scores | a3c9823f8578e63f7a39fe25791c1b0369640f26 | 11,857 |
from typing import Dict
from typing import Any
def openapi() -> Dict[str, Any]:
"""
>>> client = app.test_client()
>>> response = client.get("/openapi.json")
>>> response.get_json()['openapi']
'3.0.0'
>>> response.get_json()['info']['title']
'Chapter 13. Example 2'
"""
# See dominoes_openapi.json for full specification
return jsonify(OPENAPI_SPEC) | 1671022e42c6bd8cc75aa66c3259f5094fb05696 | 11,858 |
def koliko_izdelkov_v_skladiscu():
"""
Vrne stevilo razlicnih izdelkov v skladiscu.
>>> koliko_izdelkov_v_skladiscu()
18
"""
poizvedba = """
SELECT COUNT(*)
FROM izdelki
WHERE kolicina IS NOT null
"""
st, = conn.execute(poizvedba).fetchone()
return st | bb0143d8a7e4f404c88866331cfbdfd9c89d07f1 | 11,859 |
import math
import random
def create_spline(curve_data, s_type='NURBS', len_nodes=100, spline_id=0, splines_count=1, bud_position=None):
"""
Create a spline of given type with n nodes to form a path made of sin and cos
"""
spline = curve_data.splines.new(type=s_type)
# Regular spline points need xyz + weight
got_points = 1
co_dimension = 4
pts = spline.points
if s_type == 'BEZIER':
got_points = 2
# Bezier control points accept only xyz
co_dimension = 3
# Left and right handles are not handled here
pts = spline.bezier_points
# This is the len for numpy arrays
len_nodes = len_nodes - got_points + 1
# Every spline already has got point(s) when created
# This was compensated with got_points
pts.add(len_nodes - 1)
if bud_position is None:
bud_position = np.random.rand(co_dimension) * 1000
# Below is a play with random, sin and cos just for demo.
# Replace with your own data and if you have none, it's pretty easy
# to generate a bunch of points in space with Sverchok or Animation Nodes
radii = np.random.rand(len_nodes) + 1
radii *= radii**4 / 10
dir_walk = np.arange(len_nodes) / 10 + np.random.rand(len_nodes)
pi_walk = (np.arange(len_nodes)+1) * int(math.pi / len_nodes * 100)/(100+len_nodes)
pi_walk += random.random()*math.pi
nodes = np.random.rand(len_nodes, co_dimension)
nodes[:, 0] += bud_position[0]
nodes[:, 1] += bud_position[1]
nodes[:, 2] += bud_position[2]
rf1 = int(random.random()*3 + 1)
rf2 = int(random.random()*3 + 1)
nodes[:, 0] += np.sin(np.cos(pi_walk)) * random.random()*300+200
nodes[:, 1] += (np.cos(np.sin(pi_walk)**rf1) + np.sin(pi_walk*rf2)) * random.random()*300+200
nodes[:, 2] += np.sin(pi_walk*rf2) * np.cos(pi_walk*rf1) * random.random()*300+200
nodes [:, 0] += np.random.rand(len_nodes) * (random.random()*20+20)
nodes [:, 1] += np.random.rand(len_nodes) * (random.random()*20+20)
nodes [:, 2] += np.random.rand(len_nodes) * (random.random()*20+20)
#nodes[:, 0] += np.sin(pi_walk*random.random())*(random.random()*10+10)**2
#nodes[:, 1] += np.sin(pi_walk*random.random())*(random.random()*100+100)
#nodes[:, 2] += np.cos(pi_walk*random.random())*(random.random()*100+100)
nodes [:, :] *= (random.random()*2+0.5)
# Dummy data for key and value properties, play with HairInfo.Key and HairInfo.Value in your shader!
keys = np.arange(len_nodes) + np.random.rand(len_nodes)
values = np.random.rand(len_nodes)
pts.foreach_set('co', nodes.ravel())
pts.foreach_set('radius', radii.ravel())
pts.foreach_set('key', keys.ravel())
pts.foreach_set('value', values.ravel())
if s_type == 'BEZIER':
handle_fac = 100
lefts = nodes.copy()
lefts[:, 0] += np.random.rand(len_nodes)* handle_fac - handle_fac/2
lefts[:, 1] += np.random.rand(len_nodes)* handle_fac - handle_fac/2
lefts[:, 2] += np.random.rand(len_nodes)* handle_fac - handle_fac/2
rights = nodes.copy()
rights[:, 0] += np.random.rand(len_nodes)* handle_fac - handle_fac/2
rights[:, 1] += np.random.rand(len_nodes)* handle_fac - handle_fac/2
rights[:, 2] += np.random.rand(len_nodes)* handle_fac - handle_fac/2
pts.foreach_set('handle_left', lefts.ravel())
pts.foreach_set('handle_right', rights.ravel())
spline.use_endpoint_u = True
# Spline resolution defaults to 12 but is too much for this use-case
spline.resolution_u = 3
return spline | cce26da44f9da60638b3e46b89cf87c49ad5c3d6 | 11,860 |
def get_composed_jumps(jumps, levels, win, verbose=0):
"""
Take the output of get_jumps (from landmarks)
Compose the jumps, return them as an array of array.
If intermediate=True, we return the jumps for intermediary levels,
not just the requested one.
We use a temporary sqlite3 connection to work.
"""
assert len(levels) > 0
maxlevel = max(levels)
assert maxlevel >= 1, 'level 1 min, it means jumps between two landmarks'
# verbose
if verbose>0:
t1 = time.time()
# open temporary connection
# IT IS FAST!
# timeit.Timer("import sqlite3; conn = sqlite3.connect(':memory:'); conn.close()").timeit(10000)
# Out[35]: 0.49553799629211426
conn = sqlite3.connect(':memory:')
# special case: level = 1
if maxlevel == 1:
add_nlmk2_jumps_to_db(conn, jumps, nocopy=True)
q = "SELECT * FROM jumps_level1"
res = conn.execute(q)
composed_jumps = res.fetchall()
conn.close()
if verbose > 0:
print 'Composed jumps (max lvl = %d) obtained in %f seconds.' % (maxlevel, time.time() - t1)
return composed_jumps
# enters level1 jumps
add_nlmk2_jumps_to_db(conn, jumps)
# do upper levels
for lvl in range(2, maxlevel+1):
compose_jumps(conn, win, level=lvl)
# what do we return?
composed_jumps = []
for lvl in levels:
q = "SELECT * FROM jumps_level" + str(lvl)
res = conn.execute(q)
composed_jumps.extend(res.fetchall())
# done
conn.close()
# verbose
if verbose > 0:
print 'Composed jumps (max lvl = %d) obtained in %f seconds.' % (maxlevel, time.time() - t1)
return composed_jumps | 3305d2efed23eed269b3483a9619e50ad39826de | 11,862 |
import itertools
def calculate_agreement_stv(agreement_dictionary, turker_accuracies):
"""
Inter agreement with most accurate chair vote
Args:
agreement_dictionary: holding sentence annotation records - 9 from non-experts and 1 expert
sentence -> list of annotations (size settings.RESPONSE_COUNT + 1)
turker_accuracies: accuracy for each turker used for the chair vote
Returns:
The accuracies from combined agreement from one to nine non-experts with the expert
"""
sequence = list(range(settings.RESPONSE_COUNT))
combinations = []
for i in range(settings.RESPONSE_COUNT + 1):
combinations.append(list(itertools.combinations(sequence, i)))
print(combinations)
accuracies = [0]
standard_deviations = [0]
for i in range(1, settings.RESPONSE_COUNT + 1):
current_combinations = combinations[i]
combination_accuracies = []
for combination in current_combinations:
correct = 0
incorrect = 0
for sentence in agreement_dictionary.keys():
expert_annotations = agreement_dictionary[sentence][-1][1]
chosen_annotations = [agreement_dictionary[sentence][x][1] for x in combination]
votes = np.sum(chosen_annotations, axis=0)
chair = 0
if len(combination) > 0 and len(combination) % 2 == 0:
max_accuracy = 0
for judgement_index in combination:
turker = agreement_dictionary[sentence][judgement_index][0]
turker_accuracy = turker_accuracies[turker][0][1]
if turker_accuracy > max_accuracy:
max_accuracy = turker_accuracy
chair = judgement_index
result_votes = [0] * len(votes)
for j in range(len(votes)):
if votes[j] < len(chosen_annotations) / 2:
result_votes[j] = 0
elif votes[j] > len(chosen_annotations) / 2:
result_votes[j] = 1
else:
result_votes[j] = agreement_dictionary[sentence][chair][1][j]
for j in range(len(votes)):
if expert_annotations[j] == result_votes[j]:
correct += 1
else:
incorrect += 1
combination_accuracy = correct / (correct + incorrect)
combination_accuracies.append(combination_accuracy)
standard_deviation = np.std(combination_accuracies)
standard_deviations.append(standard_deviation)
accuracy = sum(combination_accuracies) / len(combination_accuracies)
accuracies.append(accuracy)
return accuracies, standard_deviations | 3253505366edffea1cc7c1302b082dbd85668ad2 | 11,863 |
def count_num_peps(filename):
"""
Count the number of peptide sequences in FASTA file.
"""
with open(filename) as f:
counter = 0
for line in f:
if line.startswith(">"):
counter += 1
return counter | c062a22cd925f29d8793ab364a74cf05cbae2a66 | 11,864 |
import re
def get_variables(examples):
"""Convert a code string to a list of variables.
We assume a variable is a 'word' with only alphanumeric characters in it."""
variables = [" ".join(re.split(r"\W+", text)) for text in examples["text"]]
return {"variables": variables} | 385a4fb3a73a432e6afa9aa69330f950246f48d0 | 11,865 |
def _stored_data_paths(wf, name, serializer):
"""Return list of paths created when storing data"""
metadata = wf.datafile(".{}.alfred-workflow".format(name))
datapath = wf.datafile(name + "." + serializer)
return [metadata, datapath] | 5f01d804db9f1848cc13e701a56e51c06dccdb31 | 11,866 |
def ascii_to_walls(char_matrix):
"""
A parser to build a gridworld from a text file.
Each grid has ONE start and goal location.
A reward of +1 is positioned at the goal location.
:param char_matrix: Matrix of characters.
:param p_success: Probability that the action is successful.
:param seed: The seed for the GridWorldMDP object.
:param skip_checks: Skips assertion checks.
:transition_matrix_builder_cls: The transition matrix builder to use.
:return:
"""
grid_size = len(char_matrix[0])
assert(len(char_matrix) == grid_size), 'Mismatch in the columns.'
for row in char_matrix:
assert(len(row) == grid_size), 'Mismatch in the rows.'
# ...
wall_locs = []
empty = []
for r in range(grid_size):
for c in range(grid_size):
char = char_matrix[r][c]
if char == '#':
wall_locs.append((r, c))
elif char == ' ':
empty.append((r, c))
else:
raise ValueError('Unknown character {} in grid.'.format(char))
# Attempt to make the desired gridworld.
return wall_locs, empty | 9f6520625623bd446923e374a1a5a557038dfd48 | 11,867 |
def mock_sd(nresp=1):
"""Fake Stackdriver Monitoring API response for the ListTimeSeries endpoint.
Args:
nresp (int): Number of responses to add to response.
Returns:
ChannelStub: Mocked gRPC channel stub.
"""
timeserie = load_fixture('time_series_proto.json')
response = {'next_page_token': '', 'time_series': [timeserie]}
return mock_grpc_stub(
response=response,
proto_method=metric_service_pb2.ListTimeSeriesResponse,
nresp=nresp) | cbc5659c02a73048f0263803562a130ac475bcb2 | 11,868 |
def cohesion_separation(chroms, doc):
"""Measure balancing both cohesion and separation of clusters."""
coh = cohesion(chroms, doc)
sep = separation(chroms, doc)
return (1 + sigmoid(coh)) ** sep | c883ee67e978e51b56f4be84e7e0731368eeb5f1 | 11,869 |
import re
def get_number_location(
input : str,
):
# endregion get_number_location header
# region get_number_location docs
"""
get the string indices of all numbers that occur on the string
format example: [ ( 0, 1 ), ( 4, 6 ), ( 9, 9 ) ]
both begin and end are inclusive, in contrast with the way the std_lib does it
which is begin(inclusive), end(exclusive)
"""
# endregion get_number_location docs
# region get_number_location implementation
locations = []
for match in re.finditer("\d+", input):
# match start is inclusive
position_start = match.start()
# match end is exclusive
position_end = match.end() - 1
locations.append((position_start, position_end))
...
return locations | de035f640dd33dc96b4072bdc925efc649285121 | 11,871 |
def update_object(obj, new_values):
"""update an object attributes from a supplied dictionary"""
# avoiding obj.__dict__.update(new_values) as it will set a new attribute if it doesn't exist
for k, v in new_values.items():
if hasattr(obj, k):
try:
setattr(obj, k, v)
except AttributeError: # in case of read only attribute
log(f"update_object(): can't update property: {k}, with value: {v}")
except Exception as e:
log(f'update_object(): error, {e}, property: {k}, value: {v}')
return obj | 5e916b16301c6e733b2d98b32c175bb202529503 | 11,872 |
def subpathNeedsRefresh(modTimes, ufoPath, *subPath):
"""
Determine if a file needs to be refreshed.
Returns True if the file's latest modification time is different
from its previous modification time.
"""
previous = modTimes.get(subPath[-1])
if previous is None:
return True
latest = subpathGetModTime(ufoPath, *subPath)
return latest != previous | 046c37ca801a74bb83bb45c1b1d0510e15cba6c4 | 11,873 |
def resxy_(x: float, y: float, /) -> Resolution:
"""Construct resolution from X,Y order."""
return Resolution(x=x, y=y) | 1cd2995142981de0932b8bc9452df71b18a46d8b | 11,874 |
def group_toggle_modules(request, group):
"""Enable or disable modules.
"""
if request.method != 'POST':
raise Http404
referer = request.META.get('HTTP_REFERER', None)
next = SITE_ROOT if referer is None else referer
username = request.user.username
group_wiki = request.POST.get('group_wiki', 'off')
if group_wiki == 'on':
enable_mod_for_group(group.id, MOD_GROUP_WIKI)
messages.success(request, _('Successfully enable "Wiki".'))
else:
disable_mod_for_group(group.id, MOD_GROUP_WIKI)
if referer.find('wiki') > 0:
next = reverse('group_info', args=[group.id])
messages.success(request, _('Successfully disable "Wiki".'))
return HttpResponseRedirect(next) | 4844d8203bd757802e38bff6ac20f45ade07f21d | 11,875 |
def bilinear_sampler(imgs, coords):
"""
Construct a new image by bilinear sampling from the input image.
Args:
imgs: source image to be sampled from [batch, height_s, width_s, channels]
coords: coordinates of source pixels to sample from [batch, height_t,
Returns:
A new sampled image [batch, height_t, width_t, channels]
"""
def _repeat(x, n_repeats):
rep = tf.transpose(tf.expand_dims(tf.ones(shape=tf.stack([n_repeats, ])), 1), [1, 0])
rep = tf.cast(rep, 'float32')
x = tf.matmul(tf.reshape(x, (-1, 1)), rep)
return tf.reshape(x, [-1])
coords_x, coords_y = tf.split(coords, [1, 1], axis=3)
inp_size = imgs.get_shape()
coord_size = coords.get_shape()
out_size = coords.get_shape().as_list()
out_size[3] = imgs.get_shape().as_list()[3]
coords_x = tf.cast(coords_x, 'float32')
coords_y = tf.cast(coords_y, 'float32')
y_max = tf.cast(tf.shape(imgs)[1] - 1, 'float32')
x_max = tf.cast(tf.shape(imgs)[2] - 1, 'float32')
zero = tf.zeros([1], dtype='float32')
eps = tf.constant([0.5], tf.float32)
coords_x = tf.clip_by_value(coords_x, eps, x_max - eps)
coords_y = tf.clip_by_value(coords_y, eps, y_max - eps)
x0 = tf.floor(coords_x)
x1 = x0 + 1
y0 = tf.floor(coords_y)
y1 = y0 + 1
x0_safe = tf.clip_by_value(x0, zero, x_max)
y0_safe = tf.clip_by_value(y0, zero, y_max)
x1_safe = tf.clip_by_value(x1, zero, x_max)
y1_safe = tf.clip_by_value(y1, zero, y_max)
wt_x0 = x1_safe - coords_x
wt_x1 = coords_x - x0_safe
wt_y0 = y1_safe - coords_y
wt_y1 = coords_y - y0_safe
# indices in the flat image to sample from
dim2 = tf.cast(inp_size[2], 'float32')
dim1 = tf.cast(inp_size[2] * inp_size[1], 'float32')
base = tf.reshape(_repeat(tf.cast(tf.range(coord_size[0]), 'float32') * dim1,
coord_size[1] * coord_size[2]),
[out_size[0], out_size[1], out_size[2], 1])
base_y0 = base + y0_safe * dim2
base_y1 = base + y1_safe * dim2
idx00 = tf.reshape(x0_safe + base_y0, [-1])
idx01 = x0_safe + base_y1
idx10 = x1_safe + base_y0
idx11 = x1_safe + base_y1
## sample from imgs
imgs_flat = tf.reshape(imgs, tf.stack([-1, inp_size[3]]))
imgs_flat = tf.cast(imgs_flat, 'float32')
im00 = tf.reshape(tf.gather(imgs_flat, tf.cast(idx00, 'int32')), out_size)
im01 = tf.reshape(tf.gather(imgs_flat, tf.cast(idx01, 'int32')), out_size)
im10 = tf.reshape(tf.gather(imgs_flat, tf.cast(idx10, 'int32')), out_size)
im11 = tf.reshape(tf.gather(imgs_flat, tf.cast(idx11, 'int32')), out_size)
w00 = wt_x0 * wt_y0
w01 = wt_x0 * wt_y1
w10 = wt_x1 * wt_y0
w11 = wt_x1 * wt_y1
output = tf.add_n([
w00 * im00, w01 * im01,
w10 * im10, w11 * im11
])
return output | 4138a515f0f4f25b569aae1c28d18de897c63a24 | 11,876 |
import types
def wrap_array(typingctx, data_ptr, shape_tup):
"""create an array from data_ptr with shape_tup as shape
"""
assert isinstance(data_ptr, types.CPointer), "invalid data pointer"
assert (isinstance(shape_tup, types.UniTuple)
and shape_tup.dtype == np.intp), "invalid shape tuple"
dtype = data_ptr.dtype
arr_typ = types.Array(dtype, shape_tup.count, 'C')
def codegen(context, builder, sig, args):
assert(len(args) == 2)
data = args[0]
shape = args[1]
# XXX: unnecessary allocation and copy, reuse data pointer
shape_list = cgutils.unpack_tuple(builder, shape, shape.type.count)
ary = _empty_nd_impl(context, builder, arr_typ, shape_list)
cgutils.raw_memcpy(builder, ary.data, data, ary.nitems, ary.itemsize, align=1)
# clean up image buffer
fnty = lir.FunctionType(lir.VoidType(), [lir.IntType(8).as_pointer()])
fn_release = builder.module.get_or_insert_function(fnty, name="cv_delete_buf")
builder.call(fn_release, [data])
return impl_ret_new_ref(context, builder, sig.return_type, ary._getvalue())
# # cgutils.printf(builder, "%d", shape)
# retary = context.make_array(arr_typ)(context, builder)
# itemsize = context.get_abi_sizeof(context.get_data_type(dtype))
# shape_list = cgutils.unpack_tuple(builder, shape, shape.type.count)
# strides = [context.get_constant(types.intp, itemsize)]
# for dimension_size in reversed(shape_list[1:]):
# strides.append(builder.mul(strides[-1], dimension_size))
# strides = tuple(reversed(strides))
# #import pdb; pdb.set_trace()
# context.populate_array(retary,
# data=data,
# shape=shape,
# strides=strides,
# itemsize=itemsize,
# meminfo=None)
# return retary._getvalue()
return signature(arr_typ, data_ptr, shape_tup), codegen | 03fc3c995ae459e644d88baab2ca766ff528ba8d | 11,877 |
from geometric.internal import Angle, Dihedral, Distance, OutOfPlane
from geometric.internal import PrimitiveInternalCoordinates as GeometricPRIC
from geometric.internal import (
RotationA,
RotationB,
RotationC,
TranslationX,
TranslationY,
TranslationZ,
)
from geometric.molecule import Molecule as GeometricMolecule
def geometric_project_derivatives(
molecule: Molecule,
conformer: torch.Tensor,
internal_coordinates_indices: Dict[str, torch.Tensor],
reference_gradients: torch.Tensor,
reference_hessians: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""A helper method to project a set of gradients and hessians into internal
coordinates using ``geomTRIC``.
Args:
molecule: The molecule of interest
conformer: The conformer of the molecule with units of [A] and shape=(n_atoms, 3)
internal_coordinates_indices: The indices of the atoms involved in each type
of internal coordinate.
reference_gradients: The gradients to project.
reference_hessians: The hessians to project.
Returns:
The projected gradients and hessians.
"""
geometric_molecule = GeometricMolecule()
geometric_molecule.Data = {
"resname": ["UNK"] * molecule.n_atoms,
"resid": [0] * molecule.n_atoms,
"elem": [atom.element.symbol for atom in molecule.atoms],
"bonds": [(bond.atom1_index, bond.atom2_index) for bond in molecule.bonds],
"name": molecule.name,
"xyzs": [conformer.detach().numpy()],
}
geometric_coordinates = GeometricPRIC(geometric_molecule)
geometric_coordinates.Internals = [
internal
for internal in geometric_coordinates.Internals
if not isinstance(
internal,
(TranslationX, TranslationY, TranslationZ, RotationA, RotationB, RotationC),
)
]
# We need to re-order the internal coordinates to generate those produced by
# smirnoffee.
ic_by_type = defaultdict(list)
ic_type_to_name = {
Distance: "distances",
Angle: "angles",
Dihedral: "dihedrals",
OutOfPlane: "out-of-plane-angles",
}
for internal_coordinate in geometric_coordinates.Internals:
ic_by_type[ic_type_to_name[internal_coordinate.__class__]].append(
internal_coordinate
)
ordered_internals = []
for ic_type in internal_coordinates_indices:
ic_by_index = {
_geometric_internal_coordinate_to_indices(ic): ic
for ic in ic_by_type[ic_type]
}
for ic_indices in internal_coordinates_indices[ic_type]:
ic_indices = tuple(int(i) for i in ic_indices)
if ic_indices[-1] > ic_indices[0]:
ic_indices = tuple(reversed(ic_indices))
ordered_internals.append(ic_by_index[ic_indices])
geometric_coordinates.Internals = ordered_internals
reference_gradients = reference_gradients.numpy().flatten()
reference_hessians = reference_hessians.numpy().reshape(molecule.n_atoms * 3, -1)
xyz = conformer.detach().numpy()
return (
geometric_coordinates.calcGrad(xyz, reference_gradients),
geometric_coordinates.calcHess(xyz, reference_gradients, reference_hessians),
) | f04988255698e43e0febebbf0fa6b4d67625f86f | 11,880 |
def api_get_script(request):
"""POST - Frida Get Script."""
if not request.POST.getlist('scripts[]'):
return make_api_response(
{'error': 'Missing Parameters'}, 422)
resp = tests_frida.get_script(request, True)
if resp['status'] == 'ok':
return make_api_response(resp, 200)
return make_api_response(resp, 500) | f221543d648901c38620bd84d8c6d55a3c8545e0 | 11,881 |
import re
def is_valid_slug(slug):
"""Returns true iff slug is valid."""
VALID_SLUG_RE = re.compile(r"^[a-z0-9\-]+$")
return VALID_SLUG_RE.match(slug) | 439349f0689cd53fb2f7e89b2b48b90aa79dae80 | 11,882 |
import time
import torch
def KMeans_GPU(x, K=10, Niter=10, verbose=True):
"""Implements Lloyd's algorithm for the Euclidean metric."""
start = time.time()
N, D = x.shape # Number of samples, dimension of the ambient space
c = x[:K, :].clone() # Simplistic initialization for the centroids
x_i = LazyTensor(x.view(N, 1, D)) # (N, 1, D) samples
c_j = LazyTensor(c.view(1, K, D)) # (1, K, D) centroids
# K-means loop:
# - x is the (N, D) point cloud,
# - cl is the (N,) vector of class labels
# - c is the (K, D) cloud of cluster centroids
for i in range(Niter):
# E step: assign points to the closest cluster -------------------------
D_ij = ((x_i - c_j) ** 2).sum(-1) # (N, K) symbolic squared distances
cl = D_ij.argmin(dim=1).long().view(-1) # Points -> Nearest cluster
# M step: update the centroids to the normalized cluster average: ------
# Compute the sum of points per cluster:
c.zero_()
c.scatter_add_(0, cl[:, None].repeat(1, D), x)
# Divide by the number of points per cluster:
Ncl = torch.bincount(cl, minlength=K).type_as(c).view(K, 1)
c /= Ncl # in-place division to compute the average
if verbose: # Fancy display -----------------------------------------------
if torch.cuda.is_available():
torch.cuda.synchronize()
end = time.time()
print(
f"K-means for the Euclidean metric with {N:,} points in dimension {D:,}, K = {K:,}:"
)
print(
"Timing for {} iterations: {:.5f}s = {} x {:.5f}s\n".format(
Niter, end - start, Niter, (end - start) / Niter
)
)
return cl, c | 675632335520477cdcd283b5e12b46912c26b323 | 11,883 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.