content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def notebook(request, id=0):
"""
:param request:
:param id:
:return:
"""
get_notebook = JupyterNotebooks.objects.get(id=id)
return render(request, "customdashboard/notebook.html",
{'get_notebook': get_notebook}) | 3d1e3880182f6c8d507391fc66a9c0b41f18e3bc | 8,200 |
def encrypt_decrypt(data_string, password, mode='encrypt'):
"""Encrypts OR Decrypts data_string w.r.t password based on mode specified
Parameters:
data_string: Text that needs to be encoded. passed in string format
password: a string to encrypt data before encoding into an image.
mode:
'encrypt' --> encrypts the data
'decrypt' --> decrypts the data
Returns:
Data string either encrypted or decrypted based on mode specified
"""
_hash = md5(password.encode())
hash_value = _hash.hexdigest()
key = urlsafe_b64encode(hash_value.encode())
cipher = Fernet(key) # 32-byte key - URLsafe - base64-encoded
if mode=='encrypt':
data_bytes = data_string.encode()
encrypted_bytes = cipher.encrypt(data_bytes)
encrypted_data_string = encrypted_bytes.decode()
return encrypted_data_string
elif mode=='decrypt':
encrypted_bytes = data_string.encode()
decrypted_bytes = cipher.decrypt(encrypted_bytes)
decrypted_data_string = decrypted_bytes.decode()
return decrypted_data_string
else:
raise InvalidModeError("Expected 'encrypt' OR 'decrypt' ") | c0ecdf2009fe1b40cb9ed86e12904e241eb5ea86 | 8,201 |
def compute_alphabet(sequences):
"""
Returns the alphabet used in a set of sequences.
"""
alphabet = set()
for s in sequences:
alphabet = alphabet.union(set(s))
return alphabet | cf8f7dc1e31a28fe0910d806d18189aae7d7a85b | 8,202 |
import argparse
def numpy_dtype_arg_type(string: str) -> np.dtype:
"""argument type for string reps of numpy dtypes"""
try:
ret = np.dtype(string)
except TypeError as error:
raise argparse.ArgumentTypeError(error.message)
return ret | e1d9fa61c6c6d954007e63e61c49a8564c8f6c5d | 8,203 |
def Diff(a, b):
"""Returns the number of different elements between 2 interables.
Args:
a(iterable): first iterable.
b(iterable): second iterable.
Returns:
int: the number of different elements.
"""
return sum(map(lambda x, y: bool(x-y), a, b)) | 0885bd224f956f138e80a4b681ebc581c733cc51 | 8,204 |
def load_description(model):
"""Load description of the <model>."""
desc = get_available_pkgyaml(model)
entry = read_mlhubyaml(desc)
return entry | 2a97ee446d0693af704c6b0ebd14376d3e6dea37 | 8,205 |
import math
def generate_star(rect: RectType, line_count: int = 20) -> vp.LineCollection:
"""Generate a set of line from a random point."""
orig_x = np.random.uniform(rect[0], rect[0] + rect[2])
orig_y = np.random.uniform(rect[1], rect[1] + rect[3])
r = math.hypot(rect[2], rect[3])
angles = np.linspace(0, 2 * math.pi, num=line_count, endpoint=False)
phase = np.random.normal(0, math.pi / 4)
mls = MultiLineString(
[
([orig_x, orig_y], [orig_x + r * math.cos(a), orig_y + r * math.sin(a)])
for a in angles + phase
]
)
return vp.LineCollection(mls.intersection(rect_to_polygon(rect))) | a430b486af8606d949a057b4578fdddd9968386b | 8,206 |
def failure(request):
"""Display failure message"""
return HttpResponse(f"Failure! {request.session['failure message'] if request.session['failure message'] is not None else ''}") | c9eee874106fce87d6816e3216e3a86d6eef5fab | 8,207 |
import torch
def visualize_image(cam, rgb_img, target_category):
"""
Visualize output for given image
"""
input_tensor = preprocess_image(rgb_img)
grayscale_cam = cam(input_tensor=input_tensor, target_category=target_category)
grayscale_cam = grayscale_cam[0, :]
output = cam.activations_and_grads(input_tensor)
softmax = torch.nn.Softmax(dim = 1)
print("PRED: ", softmax(output).tolist())
visualization = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True)
return visualization | 2644eca6cd50078a167b7d791a625ecb13fef17d | 8,208 |
import logging
def read_csr_matrix(f):
"""Read A in compressed sparse row format from file f. Return dense ndarray.
Text file format:
- one number per line
- First number is m = number of rows
- Second number is n = number of columns
- Next m+1 numbers are the row pointers (int, zero-based)
- Next nnz numbers are the column indices (int, zero-based)
- Next nnz numbers are the matrix values (float)
"""
logging.debug("attempting to read matrix in CSR format...")
# strip comment header
row = f.next().strip()
while row[0] == '#':
row = f.next().strip()
# read shape
m = int(row)
row = f.next().strip()
n = int(row)
logging.debug("attempting to read (%s x %s) matrix in CSR format...", m, n)
# read m+1 row pointers
counter = 0
rowPointers = np.empty(m + 1, dtype=int) # we store nnz in the last place
for i in range(m + 1):
rowPointers[i] = int(f.next().strip())
nnz = rowPointers[m]
# read nnz colum indices
colIndices = np.empty(nnz, dtype=int)
for i in range(nnz):
colIndices[i] = int(f.next().strip())
if colIndices[i] >= n:
errMsg = "Inconsistent dims, col idx %d > %d" % (colIndices[i], n)
logging.error(errMsg)
raise RuntimeError(errMsg)
# read nnz values
values = np.empty(nnz)
for i in range(nnz):
values[i] = float(f.next().strip())
# populate matrix
res = np.zeros((m, n))
for i in range(m):
for nzi in range(rowPointers[i], rowPointers[i + 1]):
res[i, colIndices[nzi]] = values[nzi]
logging.info(
"successfully read (%s x %s) matrix in CSR format with nnz %s.",
m, n, nnz)
return res | c9f1201217ca7c6ca45d0b3d258fc1828c5ab36c | 8,209 |
import random
def _get_random_hangul(count=(0xd7a4 - 0xac00)):
"""Generate a sequence of random, unique, valid Hangul characters.
Returns all possible modern Hangul characters by default.
"""
valid_hangul = [chr(_) for _ in range(0xac00, 0xd7a4)]
return random.sample(valid_hangul, count) | 3a41edd36cd2aac05e51a121743bcfb61455bd9b | 8,210 |
def build(dir, **kwargs):
"""run cmake to generate a project buildsystem
Parameters:
----------
dir str: Location of the CMake build directory
Keyword Args:
----------
parallel int: The maximum number of concurrent processes to use when building. Default: 1 less than
the number of available logical cores.
target str: Path to directory which CMake will use as the root of build directory.
config str: For multi-configuration tools, choose specified configuration
flags seq(str): Sequence of flags (or any other unlisted argument). Include preceding dash(es).
tooloptions seq(str): Sequence of options to be passed onto the build tool
env: A mapping that defines the environment variables for the new process
"""
# prune empty entries
kwargs = {key: value for key, value in kwargs.items() if value}
# add defaults if not specified
if not "parallel" in kwargs:
kwargs["parallel"] = _getWorkerCount()
# build cmake arguments
args = [findexe("cmake"), "--build", dir]
env = None
for key, value in kwargs.items():
if key in ("parallel", "target", "config"):
args.append(f"--{key}")
args.append(f"{value}")
elif key == "flags":
for f in value:
args.append(f)
elif key == "env":
env = value
elif key is not "tooloptions":
raise KeyError
if "tooloptions" in kwargs:
args.append("--")
for f in value:
args.append(f)
return run(args, env=env).check_returncode() | 2f21fc901d7c95d3b1a2b37d6ba3584d6cb96efb | 8,211 |
def is_builtin_model(target: type) -> bool:
"""returns ``True`` if the given type is a model subclass"""
return is_builtin_class_except(target, ["MetaModel", "Model", "DataBag"]) | 6b1cf3b0fdd0db50c0dde6a2ca3f3bcb8e8328cf | 8,212 |
def runQ(qparsed, params=dict(), nbcircuits=1, nbqubits = None):
"""
qparsed: qlang circuit (already parsed)
params:{x:np.array, t:np.array}
"""
#*** verify if parameters are ok for qparsed circuit ****
_ , vector_parameters = parseqlang.parameters_from_gates(qparsed)
for pname, dimension in vector_parameters.items():
if pname not in params:
raise Exception(f'Vector parameter "{pname}" not provided')
if params[pname].shape[0] != dimension:
raise Exception(f"Vector parameter {pname} is of dimension {dimension} but %d are provided"%params[pname].shape[0])
if len(params[pname].shape)==1: nb_rows = 1
else: nb_rows =params[pname].shape[1]
if nbcircuits==1 and nb_rows>1: nbcircuits= nb_rows
elif nbcircuits != nb_rows and nb_rows != 1:
raise Exception(f"{pname}: got {nb_rows} rows ({nbcircuits} expected)")
#*** determine nb qubits ****
qbits = parseqlang.nbqubits_from_gates(qparsed)
if(nbqubits==None): nbqubits = qbits
elif nbqubits<qbits: raise Exception(f"{nbqubits} qubits asked, but {qbits} qubits are needed")
#*** run circuit(s) with manyq ****
initQreg(nbqubits,n=nbcircuits)
for gate in qparsed:
gname = gate[0]
gparam = gate[1]
qbit0 = gparam[0]
# print(gate)
# print(f"qbit0: {qbit0}")
if gname in {"CZ","CX"}:
qbit1 = gparam[1]
# print(f"Running {gname}({qbit0},{qbit1})")
globals()[gname](qbit0,qbit1)
continue
pname = gparam[1][0]
pindex = gparam[1][1]
# print(f"Running {gname}({qbit0},{pname}_{pindex})")
globals()[gname](qbit0,params.get(pname)[pindex])
return Qreg.outQ | d6ca4afd14e56961920033e9e7ab40f4fc4a9ae6 | 8,213 |
def add_CNNB_loss_v2(true_labels,
hidden,
embed_model,
bsz=512,
dataset='imagenet2012',
hidden_norm=True,
temperature=1.0,
strategy=None,
loss_type='ce',
clip_min=0,
method='onehot'):
"""Compute loss for model.
Args:
true_labels: vector of labels.
hidden: hidden vector (`Tensor`) of shape (bsz, dim).
hidden_norm: whether or not to use normalization on the hidden vector.
temperature: a `floating` number for temperature scaling.
strategy: context information for tpu.
Returns:
A loss scalar.
The logits for contrastive prediction task.
The labels for contrastive prediction task.
"""
# Get (normalized) hidden1 and hidden2.
if hidden_norm:
hidden = tf.math.l2_normalize(hidden, -1)
hidden1, hidden2 = tf.split(hidden, 2, 0)
batch_size = tf.shape(hidden1)[0]
# Gather hidden1/hidden2 across replicas and create local labels.
if strategy is not None:
hidden1_large = tpu_cross_replica_concat(hidden1, strategy)
hidden2_large = tpu_cross_replica_concat(hidden2, strategy)
enlarged_batch_size = tf.shape(hidden1_large)[0]
# TODO(iamtingchen): more elegant way to convert u32 to s32 for replica_id.
replica_context = tf.distribute.get_replica_context()
reps = strategy.num_replicas_in_sync
sims=get_batch_sims(true_labels, embed_model, bsz//reps, dataset, method)
sims=tf.cast(sims > clip_min, sims.dtype) * sims
#sims.set_shape([512//reps, 512//reps])
replica_id = tf.cast(
tf.cast(replica_context.replica_id_in_sync_group, tf.uint32), tf.int32)
labels_idx = tf.range(batch_size) + replica_id * batch_size
labels1=tf.concat([sims if i==replica_id else tf.zeros(sims.shape) for i in range(reps)],1)
labels2=tf.concat([sims-tf.linalg.diag(tf.linalg.diag_part(sims)) if i==replica_id else tf.zeros(sims.shape) for i in range(reps)],1)
labels=tf.concat([labels1,labels2],1)
masks = tf.one_hot(labels_idx, enlarged_batch_size)
else:
#sims.set_shape([batch_size, batch_size])
sims=get_batch_sims(true_labels, embed_model, bsz, dataset, method)
sims=tf.cast(sims > clip_min, sims.dtype) * sims
hidden1_large = hidden1
hidden2_large = hidden2
labels=tf.concat([sims,sims-tf.linalg.diag(tf.linalg.diag_part(sims))],1)
masks = tf.one_hot(tf.range(batch_size), batch_size)
slabels=tf.split(labels, 2, axis=1)
#Calculate similarity between hidden representations from aug1 and from aug1
logits_aa = tf.matmul(hidden1, hidden1_large, transpose_b=True) / temperature
# tf.print(true_labels)
# tf.print(logits_aa)
#Calculate similarity between hidden representations from aug2 and from aug2
logits_bb = tf.matmul(hidden2, hidden2_large, transpose_b=True) / temperature
if loss_type not in ['fro']:
#Mask out entries corresponding to diagonal (self-similarity) so they are 0 once softmaxed
logits_aa = logits_aa - masks * LARGE_NUM
#Mask out entries corresponding to diagonal (self-similarity) so they are 0 once softmaxed
logits_bb = logits_bb - masks * LARGE_NUM
else:
logits_aa = logits_aa - masks * logits_aa
logits_bb = logits_bb - masks * logits_bb
#Calculate similarity between hidden representations from aug1 and from aug2
logits_ab = tf.matmul(hidden1, hidden2_large, transpose_b=True) / temperature
#Calculate similarity between hidden representations from aug2 and from aug1
#-> identical to above case if using single GPU
logits_ba = tf.matmul(hidden2, hidden1_large, transpose_b=True) / temperature
#Calculate loss for aug1 samples by taking softmax over logits and then applying cross_entropy
# tf.print(slabels[0].shape)
# tf.print(slabels[1].shape)
# tf.print(logits_ab.shape)
# tf.print(logits_aa.shape)
if loss_type=='ce':
loss_fn = tf.nn.softmax_cross_entropy_with_logits
loss_a = tf.reduce_mean(loss_fn(slabels[0],logits_ab)+loss_fn(slabels[1]-masks*slabels[1],logits_aa))
loss_b = tf.reduce_mean(loss_fn(slabels[0],logits_ba)+loss_fn(slabels[1]-masks*slabels[1],logits_bb))
elif loss_type=='softmax-ce':
loss_fn = tf.nn.softmax_cross_entropy_with_logits
slabels[0]=tf.nn.softmax(slabels[0]/temperature)
slabels[1]=tf.nn.softmax((slabels[1]/temperature)-masks*LARGE_NUM)
loss_a = tf.reduce_mean(loss_fn(slabels[0],logits_ab)+loss_fn(slabels[1],logits_aa))
loss_b = tf.reduce_mean(loss_fn(slabels[0],logits_ba)+loss_fn(slabels[1],logits_bb))
elif loss_type=='kl': # Consider softmaxing labels here
loss_fn = KLDivergence(tf.keras.losses.Reduction.NONE)
loss_a = tf.reduce_mean(loss_fn(slabels[0],tf.nn.softmax(logits_ab))+loss_fn(slabels[1]-masks*slabels[1],tf.nn.softmax(logits_aa)))
loss_b = tf.reduce_mean(loss_fn(slabels[0],tf.nn.softmax(logits_ba))+loss_fn(slabels[1]-masks*slabels[1],tf.nn.softmax(logits_bb)))
elif loss_type=='klsoft':
loss_fn = KLDivergence(tf.keras.losses.Reduction.NONE)
slabels[0]=tf.nn.softmax(slabels[0]/temperature)
slabels[1]=tf.nn.softmax((slabels[1]/temperature)-masks*LARGE_NUM)
loss_a = tf.reduce_mean(loss_fn(slabels[0],tf.nn.softmax(logits_ab))+loss_fn(slabels[1],tf.nn.softmax(logits_aa)))
loss_b = tf.reduce_mean(loss_fn(slabels[0],tf.nn.softmax(logits_ba))+loss_fn(slabels[1],tf.nn.softmax(logits_bb)))
elif loss_type=='fro': #Consider softmaxing labels here
loss_fn=tf.norm
loss_a = tf.reduce_mean(loss_fn(slabels[0]-logits_ab, ord='fro', axis=(0,1))+loss_fn(slabels[1]-logits_aa, ord='fro', axis=(0,1)))
loss_b = tf.reduce_mean(loss_fn(slabels[0]-logits_ba, ord='fro', axis=(0,1))+loss_fn(slabels[1]-logits_bb, ord='fro', axis=(0,1)))
loss = tf.reduce_mean(loss_a + loss_b)
return loss, logits_ab, labels | 117af71c6f84ad1d2553cd3a18001b93af8539c6 | 8,214 |
def simulation_wells(self):
"""Get a list of all simulation wells for a case
Returns:
:class:`rips.generated.generated_classes.SimulationWell`
"""
wells = self.descendants(SimulationWell)
return wells | ecf13fc524f12be21593c49d8d22c365564716e9 | 8,215 |
from typing import Optional
def getSmartMeter() -> Optional[str]:
"""Return smartmeter name used in recording."""
mapping = getDeviceMapping()
# Identifier for smartmeter is meter with phase 0
try: return next(key for key in mapping if mapping[key]["phase"] == 0)
except StopIteration: return None | 8e947d5d9078886f9bc2b662162304eb2fb6474b | 8,216 |
def olbp(array, point):
"""Perform simple local binary pattern calculation with a fixed
3x3 neighbourhood. Thanks to:
http://www.bytefish.de/blog/local_binary_patterns/
for a really nice explanation of LBP.
Won't return correct results around the image boundaries.
Because it's only a 3x3 neighbourhood, probably very susceptible
to noise.
TODO: Bigger neighbourhood. Variable size maybe?
Returns: A single decimal number (the encoded pattern)
"""
x, y = point
# Make sure we're within the array bounds.
if x < 1:
x = 1
if x > (array.shape[0] - 2):
x = array.shape[0] - 2
if y < 1:
y = 1
if y > (array.shape[1] - 2):
y = array.shape[1] - 2
center = array[x, y]
code = 0
code |= (array[x - 1, y - 1] > center) << 7
code |= (array[x - 1, y] > center) << 6
code |= (array[x - 1, y + 1] > center) << 5
code |= (array[x, y - 1] > center) << 4
code |= (array[x, y + 1] > center) << 3
code |= (array[x + 1, y - 1] > center) << 2
code |= (array[x + 1, y] > center) << 1
code |= (array[x + 1, y + 1] > center) << 0
return code | 0db7058515ef435fe744a3b304e6532ec56b5f63 | 8,217 |
import http
from typing import Optional
def view_index(
request: http.HttpRequest,
workflow: Optional[models.Workflow] = None,
) -> http.HttpResponse:
"""Render the list of views attached to a workflow.
:param request: Http request received.
:param workflow: Workflow being processed
:return: HTTP response with the table
"""
# Get the views
views = workflow.views.values(
'id',
'name',
'description_text',
'modified')
# Build the table only if there is anything to show (prevent empty table)
return render(
request,
'table/view_index.html',
{
'query_builder_ops': workflow.get_query_builder_ops_as_str(),
'table': services.ViewTable(views, orderable=False),
},
) | 2dca3e42a3d2b855d795fc0c61b41c2a2f449724 | 8,218 |
def inverseLPSB(data, mu, g):
"""Compute regularized L-PSB step."""
mUpd = data.mUpd
gamma = data.gamma
gammah = data.gamma + mu
Q22 = np.tril(data.STY[:mUpd, :mUpd], -1) + np.tril(data.STY[:mUpd, :mUpd], -1).T + \
np.diag(np.diag(data.STY[:mUpd, :mUpd])) + \
gamma * np.diag(np.diag(data.STS[:mUpd, :mUpd]))
Q = np.block([
[np.zeros((mUpd, mUpd)), np.triu(data.STS[:mUpd, :mUpd])],
[np.triu(data.STS[:mUpd, :mUpd]).T, Q22]
])
Q += 1/gammah * np.block([
[data.STS[:mUpd, :mUpd], data.STY[:mUpd, :mUpd]],
[data.STY[:mUpd, :mUpd].T, data.YTY[:mUpd, :mUpd]]
])
ATg = np.block([data.S[:, :mUpd].T @ g, data.Y[:, :mUpd].T @ g])
p = np.linalg.solve(Q, ATg)
#p = scipy.linalg.solve(Q, ATg, assume_a='sym')
Ap = data.S[:, :mUpd] @ p[:mUpd] + data.Y[:, :mUpd] @ p[mUpd:]
d = 1/gammah**2 * Ap - 1/gammah * g
return d | 500705d0fd5cb5ccae7fa8dfeaa1548a65fa2203 | 8,219 |
import json
def canPlay(request):
"""
Endpoint qui retourne la liste des cartes qui peuvent être jouées ( pour le Player )
rq : {
"cards_played" : [
{
"card_name": "As",
"value_non_atout": 0,
"value_atout": 0,
"id" : "A"
},
{
"card_name": "7s",
"value_non_atout": 0,
"value_atout": 0,
"id" : "7"
},
{
"card_name": "8s",
"value_non_atout": 0,
"value_atout": 0,
"id" : "8"
}
],
"atout" : "c",
"opening_color" : "s",
"remaining_cards": [
{
"card_name": "7d",
"value_non_atout": 0,
"value_atout": 0,
"id":"7"
},
{
"card_name": "Kh",
"value_non_atout": 4,
"value_atout": 4,
"id":"K"
},
{
"card_name": "Ks",
"value_non_atout": 4,
"value_atout": 4,
"id":"K"
},
{
"card_name": "Ac",
"value_non_atout": 11,
"value_atout": 11,
"id":"A"
},
{
"card_name": "9c",
"value_non_atout": 0,
"value_atout": 14,
"id":"9"
}
]
}
"""
body = json.loads(request.body)
cards_played = body['cards_played']
remaining_cards = body['remaining_cards']
opening_color = body['opening_color']
atout = body['atout']
can_play = []
cards_atout = []
order_no_a = ['7','8','9','J','Q','K','10','A']
order_a = ['7','8','Q','K','10','A','9','J']
if cards_played:
if opening_color == atout:
for x in remaining_cards:
if opening_color in x['card_name']:
cards_atout.append(x)
if not cards_atout:
can_play=remaining_cards
else:
max=0
if len(cards_played)==1:
max=order_a.index(cards_played[0]['idc'])
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
elif len(cards_played)==2:
max = order_a.index(cards_played[0]['idc'])
if atout in cards_played[1]['card_name']:
if order_a.index(cards_played[1]['idc']) > max :
max = order_a.index(cards_played[1]['idc'])
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
max = order_a.index(cards_played[0]['idc'])
if atout in cards_played[1]['card_name']:
if order_a.index(cards_played[1]['idc']) > max :
max = order_a.index(cards_played[1]['idc'])
if atout in cards_played[2]['card_name']:
if order_a.index(cards_played[2]['idc']) > max :
max = order_a.index(cards_played[2]['idc'])
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
if atout in cards_played[2]['card_name']:
if order_a.index(cards_played[2]['idc']) > max :
max = order_a.index(cards_played[2]['idc'])
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
if atout in cards_played[2]['card_name']:
if order_a.index(cards_played[2]['idc']) > max :
max = order_a.index(cards_played[2]['idc'])
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for x in remaining_cards:
if opening_color in x['card_name']:
can_play.append(x)
if not can_play:
i=0
for x in remaining_cards:
if atout in x['card_name']:
i+=1
cards_atout.append(x)
if i==0:
can_play=remaining_cards
else:
# Le joueur possede un atout, il faut regarder qui est maître
if len(cards_played)==3:
max=0
if atout in cards_played[1]['card_name']:
max = order_a.index(cards_played[1]['idc'])
if atout in cards_played[2]['card_name']:
if order_a.index(cards_played[2]['idc']) > max :
max = order_a.index(cards_played[2]['idc'])
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
can_play=remaining_cards
else:
if atout in cards_played[2]['card_name']:
max = order_a.index(cards_played[2]['idc'])
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
if order_no_a.index(cards_played[2]['idc'])<order_no_a.index(cards_played[1]['idc']) and order_no_a.index(cards_played[1]['idc']) >order_no_a.index(cards_played[0]['idc']):
can_play=remaining_cards
else:
can_play=cards_atout
elif len(cards_played)==1:
can_play=cards_atout
else:
max=0
if atout in cards_played[1]['card_name']:
max = order_a.index(cards_played[1]['idc'])
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
if order_no_a.index(cards_played[1]['idc'])<order_no_a.index(cards_played[0]['idc']):
can_play=remaining_cards
else:
can_play=cards_atout
else:
can_play=remaining_cards
return Response(can_play) | b579e0e99eebed68fa55f8eeb287cb2cdf283de6 | 8,220 |
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Genie Aladdin component."""
return True | 22f6c6126d8d7b3ce7df124b144b9ccfb4fc30c2 | 8,221 |
import importlib
def _module_available(module_path: str) -> bool:
"""Testing if given module is avalaible in your env
>>> _module_available('os')
True
>>> _module_available('bla.bla')
False
"""
mods = module_path.split('.')
assert mods, 'nothing given to test'
# it has to be tested as per partets
for i in range(len(mods)):
module_path = '.'.join(mods[:i + 1])
if importlib.util.find_spec(module_path) is None:
return False
return True | 6673bf25845af6c12494ffbec0dc8bb8ab950ff2 | 8,222 |
def _GetPathBeforeFinalDir(uri):
"""
Returns the part of the path before the final directory component for the
given URI, handling cases for file system directories, bucket, and bucket
subdirectories. Example: for gs://bucket/dir/ we'll return 'gs://bucket',
and for file://dir we'll return file://
Args:
uri: StorageUri.
Returns:
String name of above-described path, sans final path separator.
"""
sep = uri.delim
# If the source uri argument had a wildcard and wasn't expanded by the
# shell, then uri.names_file() will always be true, so we check for
# this case explicitly.
assert ((not uri.names_file()) or ContainsWildcard(uri.object_name))
if uri.names_directory():
past_scheme = uri.uri[len('file://'):]
if past_scheme.find(sep) == -1:
return 'file://'
else:
return 'file://%s' % past_scheme.rstrip(sep).rpartition(sep)[0]
if uri.names_bucket():
return '%s://' % uri.scheme
# Else it names a bucket subdir.
return uri.uri.rstrip(sep).rpartition(sep)[0] | 20cad56a858e8feccfd7154ecff33d9508a7ec80 | 8,223 |
import toml
def load_page_details(data, filename=None):
"""
# Raises
ValueError of (filename, error)
"""
try:
options = toml.loads(data)
except toml.TomlDecodeError as exc:
raise ValueError(filename, exc)
if not isinstance(options, dict):
raise ValueError(filename, 'page details could not be parsed into a JSON object')
return options | 117bb7d84625475745a30522fda7dccf1bc5a487 | 8,224 |
def nested_render(cfg, fully_rendered_cfgs, replacements):
"""
Template render the provided cfg by recurisevly replacing {{var}}'s which values
from the current "namespace".
The nested config is treated like nested namespaces where the inner variables
are only available in current block and further nested blocks.
Said the opposite way: the namespace with available vars that can be used
includes the current block's vars and parent block vars.
This means that you can do replacements for top-level
(global namespaced) config vars anywhere, but you can only use inner configs within
that block or further nested blocks.
An example is worth a thousand words:
---------------------------------------------------------------------------------
fence-config.yaml
--------------------------------------------------------------------------------
BASE_URL: 'http://localhost/user'
OPENID_CONNECT:
fence:
api_base_url: 'http://other_fence/user'
client_kwargs:
redirect_uri: '{{BASE_URL}}/login/fence/login'
authorize_url: '{{api_base_url}}/oauth2/authorize'
THIS_WONT_WORK: '{{api_base_url}}/test'
--------------------------------------------------------------------------------
"redirect_uri" will become "http://localhost/user/login/fence/login"
- BASE_URL is in the global namespace so it can be used in this nested cfg
"authorize_url" will become "http://other_fence/user/oauth2/authorize"
- api_base_url is in the current namespace, so it is available
"THIS_WONT_WORK" will become "/test"
- Why? api_base_url is not in the current namespace and so we cannot use that
as a replacement. the configuration (instead of failing) will replace with
an empty string
Args:
cfg (TYPE): Description
fully_rendered_cfgs (TYPE): Description
replacements (TYPE): Description
Returns:
dict: Configurations with template vars replaced
"""
try:
for key, value in cfg.iteritems():
replacements.update(cfg)
fully_rendered_cfgs[key] = {}
fully_rendered_cfgs[key] = nested_render(
value,
fully_rendered_cfgs=fully_rendered_cfgs[key],
replacements=replacements,
)
# new namespace, remove current vars (no longer available as replacements)
for old_cfg, value in cfg.iteritems():
replacements.pop(old_cfg, None)
return fully_rendered_cfgs
except AttributeError:
# it's not a dict, so lets try to render it. But only if it's
# truthy (which means there's actually something to replace)
if cfg:
t = Template(str(cfg))
rendered_value = t.render(**replacements)
try:
cfg = yaml_load(rendered_value)
except ScannerError:
# it's not loading into yaml, so let's assume it's a string with special
# chars such as: {}[],&*#?|:-<>=!%@\)
#
# in YAML, we have to "quote" a string with special chars.
#
# since yaml_load isn't loading from a file, we need to wrap the Python
# str in actual quotes.
cfg = yaml_load('"{}"'.format(rendered_value))
return cfg | 9958e792ef09aa7c88e4c8b7d29a61a8713927a2 | 8,225 |
from operator import sub
import warnings
def element_by_atomic_number(atomic_number):
"""Search for an element by its atomic number
Look up an element from a list of known elements by atomic number.
Return None if no match found.
Parameters
----------
atomic_number : int
Element atomic number that need to look for
if a string is provided, only numbers are considered during the search
Returns
-------
matched_element : element.Element
Return an element from the periodic table if we find a match,
otherwise raise GMSOError
"""
if isinstance(atomic_number, str):
atomic_number_trimmed = int(sub('[a-z -]', '', atomic_number.lower()).lstrip('0'))
msg = '''Letters and spaces are not considered when searching by element atomic number. \n
{} became {}'.format(atomic_number, atomic_number_trimmed)'''
warnings.warn(msg)
else:
atomic_number_trimmed = atomic_number
matched_element = atomic_dict.get(atomic_number_trimmed)
if matched_element is None:
raise GMSOError(f'Failed to find an element with atomic number {atomic_number_trimmed}')
return matched_element | 42a23d0bd2ce1391a74ee8b5d5f97aa5fc8b2d3f | 8,226 |
import tqdm
def get_data_from_db(cursor):
"""
Get data from the database given a query-instantiated cursor
:param cursor: query-instantiated database cursor
:return: tuple of labels and training data
"""
training_data, labels = [], []
cols = [desc[0] for desc in cursor.description]
for record in tqdm(cursor, total=cursor.rowcount):
record = dict(record)
record['purposes'] = [purpose_to_english[p] for p in record['purposes']]
# just duplicate for house_number and year of construction
record['house_number_vec'] = record['house_number']
record['year_of_construction_vec'] = record['year_of_construction']
# one-hot encoding for house number addition
if record['house_number_addition']:
hna = np.zeros(shape=(len(record['house_number_addition']), len(VOCABULARY)))
for idx, char in enumerate(record['house_number_addition']):
hna[idx, VOCABULARY.index(char.lower())] = 1.
else:
hna = np.zeros(shape=(1, len(VOCABULARY)))
record['house_number_addition_vec'] = hna
# 'multi-hot' encoding for building purposes
purposes = np.zeros(shape=(len(PURPOSES,)))
for purpose in record['purposes']:
purposes[PURPOSES.index(purpose)] = 1.
record['purposes_vec'] = purposes
# character-level vectorization of postal code
pc = np.zeros((len(record['postal_code']), len(VOCABULARY)))
for idx, char in enumerate(record['postal_code']):
pc[idx, VOCABULARY.index(char.lower())] = 1.
record['postal_code_vec'] = pc
# building geometry vectorization
geom = record['geometry_crs84']
geom = vectorize_wkt(geom)
record['geometry_vec'] = geom
record['centroid_vec'] = vectorize_wkt(record['centroid_crs84'])[0, :2]
# vectorization of neighbouring buildings
neighbours = record['neighbouring_buildings_crs84']
neighbours = vectorize_wkt(neighbours)
record['neighbouring_buildings_vec'] = neighbours
rd = record['recorded_date']
record['recorded_date_vec'] = [rd.year, rd.month, rd.day, rd.weekday()]
rgd = record['registration_date']
record['registration_date_vec'] = [rgd.year, rgd.month, rgd.day, rgd.weekday()]
training_data.append(record)
labels.append({
'energy_performance_index': record['energy_performance_index'],
'energy_performance_label': record['energy_performance_label'],
'energy_performance_vec': ENERGY_CLASSES.index(record['energy_performance_label'])
})
return training_data, labels | a3db9af7912fd38e9966f0c95639613cb4dac087 | 8,227 |
def parse(input_str, file_path=True):
"""
Parse a GLM into an omf.feeder tree. This is so we can walk the tree,
change things in bulk, etc.
Input can be a file path or GLM string.
"""
tokens = _tokenize_glm(input_str, file_path)
return _parse_token_list(tokens) | 2e53a6870baae3fa9bfb511b28baf73e697b44a0 | 8,228 |
def pred(model, x_pred_scaled, scaler_y):
"""
Predict
:param model: model for prediction
:param x_pred_scaled: scaled x values we need to predict for
:param scaler_y: scaler for y values
:return:
"""
MAX_PREDICT_SIZE = 10000
g_mean_full = g_std_full = None
start = 0
while start < len(x_pred_scaled):
end = start + MAX_PREDICT_SIZE
x_pred_scaled_slice = x_pred_scaled[start:end]
g_mean_scaled, g_std_scaled = model_gpflow.predict_gpflow(model, x_pred_scaled_slice)
g_mean = scaler_y.inverse_transform(g_mean_scaled)
g_std = g_std_scaled * scaler_y.scale_
if g_mean_full is None:
g_mean_full = g_mean
g_std_full = g_std
else:
g_mean_full = np.vstack((g_mean_full, g_mean))
g_std_full = np.vstack((g_std_full, g_std))
start = end
return g_mean_full, g_std_full | 7a43020a2b4817b3287c849bc0059027346bf5a5 | 8,229 |
def metadataAbstractElementEmptyValuesTest3():
"""
Empty value for unknown attribute.
>>> doctestMetadataAbstractElementFunction(
... testMetadataAbstractElementEmptyValue,
... metadataAbstractElementEmptyValuesTest3(),
... requiredAttributes=["required1"],
... optionalAttributes=["optional1"])
[]
"""
metadata = """<?xml version="1.0" encoding="UTF-8"?>
<test required1="foo" optional1="foo" unknown1="" />
"""
return ElementTree.fromstring(metadata) | 4f9e0d33948a9ac1ab35bc9cf0c5c3274cfc9d41 | 8,230 |
def orthonormal_initializer(input_size, output_size):
"""from https://github.com/patverga/bran/blob/32378da8ac339393d9faa2ff2d50ccb3b379e9a2/src/tf_utils.py#L154"""
I = np.eye(output_size)
lr = .1
eps = .05/(output_size + input_size)
success = False
tries = 0
while not success and tries < 10:
Q = np.random.randn(input_size, output_size) / np.sqrt(output_size)
for i in range(100):
QTQmI = Q.T.dot(Q) - I
loss = np.sum(QTQmI**2 / 2)
Q2 = Q**2
Q -= lr*Q.dot(QTQmI) / (np.abs(Q2 + Q2.sum(axis=0,
keepdims=True) + Q2.sum(axis=1, keepdims=True) - 1) + eps)
if np.max(Q) > 1e6 or loss > 1e6 or not np.isfinite(loss):
tries += 1
lr /= 2
break
success = True
if success:
print('Orthogonal pretrainer loss: %.2e' % loss)
else:
print('Orthogonal pretrainer failed, using non-orthogonal random matrix')
Q = np.random.randn(input_size, output_size) / np.sqrt(output_size)
return Q.astype(np.float32) | 11cc28d6342ed20699c96051a36199c3b8941381 | 8,231 |
def stick_figure (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" General function for drawing stick based parts (e.g., ribozyme and protease sites).
"""
# Default options
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
x_extent = 5.0
y_extent = 10.0
linestyle = '-'
linetype = "";
shapetype = "";
if(type == "Ribozyme"):
linetype = 'dash'
headgroup = 'O'
elif(type == "Protease"):
linetype = 'dash'
headgroup = 'X'
elif(type == "ProteinStability"):
linetype = 'solid'
headgroup = 'O'
elif(type == "Ribonuclease"):
linetype = 'solid'
headgroup = 'X'
# Reset defaults if provided
if opts != None:
if 'color' in opts.keys():
color = opts['color']
if 'start_pad' in opts.keys():
start_pad = opts['start_pad']
if 'end_pad' in opts.keys():
end_pad = opts['end_pad']
if 'x_extent' in opts.keys():
x_extent = opts['x_extent']
if 'y_extent' in opts.keys():
y_extent = opts['y_extent']
if 'linestyle' in opts.keys():
linestyle = opts['linestyle']
if 'linewidth' in opts.keys():
linewidth = opts['linewidth']
if 'scale' in opts.keys():
scale = opts['scale']
# Check direction add start padding
final_end = end
final_start = prev_end
if start > end:
start = prev_end+end_pad+x_extent
end = prev_end+end_pad
final_end = start+start_pad
rbs_center = (end+((start-end)/2.0),-y_extent)
c1 = Circle(rbs_center, x_extent/2.0, linewidth=linewidth, edgecolor=color,
facecolor=(1,1,1), zorder=8)
x1 = Line2D([start,end],[-y_extent*1.25,-y_extent/1.5],
linewidth=linewidth, color=color, zorder=12, linestyle='-')
x2 = Line2D([start,end],[-y_extent/1.5,-y_extent*1.25],
linewidth=linewidth, color=color, zorder=12, linestyle='-')
dash1 = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,-y_extent/4],
linewidth=linewidth, color=color, zorder=8, linestyle=linestyle)
dash2 = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[-y_extent/2,-y_extent+(x_extent/2.0)],
linewidth=linewidth, color=color, zorder=8, linestyle=linestyle)
solidO = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,-y_extent+(x_extent/2.0)],
linewidth=linewidth, color=color, zorder=8, linestyle=linestyle)
solidX = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,-y_extent],
linewidth=linewidth, color=color, zorder=8, linestyle=linestyle)
if(headgroup == "O" and linetype == "dash"):
ax.add_patch(c1)
ax.add_line(dash1)
ax.add_line(dash2)
elif(headgroup == "X" and linetype == "dash"):
ax.add_line(x1)
ax.add_line(x2)
ax.add_line(dash1)
ax.add_line(dash2)
elif(headgroup == "O" and linetype == "solid"):
ax.add_patch(c1)
ax.add_line(solidO)
elif(headgroup == "X" and linetype == "solid"):
ax.add_line(x1)
ax.add_line(x2)
ax.add_line(solidX)
else:
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
rbs_center = (start+((end-start)/2.0),y_extent)
c1 = Circle(rbs_center, x_extent/2.0, linewidth=linewidth, edgecolor=color,
facecolor=(1,1,1), zorder=8)
x1 = Line2D([start,end],[y_extent*1.25,y_extent/1.5],
linewidth=linewidth, color=color, zorder=12, linestyle='-')
x2 = Line2D([start,end],[y_extent/1.5,y_extent*1.25],
linewidth=linewidth, color=color, zorder=12, linestyle='-')
dash1 = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,y_extent/4],
linewidth=linewidth, color=color, zorder=8, linestyle=linestyle)
dash2 = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[y_extent/2,y_extent-(x_extent/2.0)],
linewidth=linewidth, color=color, zorder=8, linestyle=linestyle)
solidO = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,y_extent-(x_extent/2.0)],
linewidth=linewidth, color=color, zorder=8, linestyle=linestyle)
solidX = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,y_extent],
linewidth=linewidth, color=color, zorder=8, linestyle=linestyle)
if(headgroup == 'O' and linetype == 'dash'):
ax.add_patch(c1)
ax.add_line(dash1)
ax.add_line(dash2)
elif(headgroup == "X" and linetype == "dash"):
ax.add_line(x1)
ax.add_line(x2)
ax.add_line(dash1)
ax.add_line(dash2)
elif(headgroup == "O" and linetype == "solid"):
ax.add_patch(c1)
ax.add_line(solidO)
elif(headgroup == "X" and linetype == "solid"):
ax.add_line(x1)
ax.add_line(x2)
ax.add_line(solidX)
if opts != None and 'label' in opts.keys():
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end | 68ae3194ad1dd38e0e18f8da3b0b5f1b0e22071a | 8,232 |
def display_datetime(datetime_str, time_zone=None, verbose=True):
"""Returns a formatted datetime with TZ (if provided) or 'Error (Missing)"""
"""
>>> print(datetime.datetime.utcnow().strftime("%Y/%m/%d %a %I:%M %p"))
2019/05/19 Sun 01:10 AM
"""
if datetime_str: # and type(datetime_str) == datetime.datetime.now():
if verbose:
return f'{datetime_str.strftime("%Y/%m/%d %a %I:%M %p")}{f" ({time_zone})" if time_zone else ""}'
else:
return f'{datetime_str.strftime("%a %I:%M %p")}{f" ({time_zone})" if time_zone else ""}'
else:
return 'Error (Missing)' | 45caa488688e790ae19f8f3f2cda2cb0f250b1fd | 8,233 |
import torch
def mask_channels(mask_type, in_channels, out_channels, data_channels=3):
"""
Creates an autoregressive channel mask.
Input:
mask_type: str
Either 'A' or 'B'. 'A' for first layer of network, 'B' for all others.
in_channels: int
Number of input channels to layer.
out_channels: int
Number of output channels of layer.
data_channels: int
Number of channels in the input data, e.g. 3 for RGB images. (default = 3).
Output:
mask: torch.FloatTensor
Shape (out_channels, in_channels).
A mask with 0 in places for masked elements.
"""
in_factor = in_channels // data_channels + 1
out_factor = out_channels // data_channels + 1
base_mask = torch.ones([data_channels,data_channels])
if mask_type == 'A':
base_mask = base_mask.tril(-1)
else:
base_mask = base_mask.tril(0)
mask_p1 = torch.cat([base_mask]*in_factor, dim=1)
mask_p2 = torch.cat([mask_p1]*out_factor, dim=0)
mask = mask_p2[0:out_channels,0:in_channels]
return mask | 772fa71f63d2f31c80966db0b0eb43a70ac5e9a9 | 8,234 |
import textwrap
def dedent(text):
"""
Remove all common indentation from every line but the 0th.
This will avoid getting <code> blocks when rendering text via markdown.
Ignoring the 0th line will also allow the 0th line not to be aligned.
Args:
text: A string of text to dedent.
Returns:
String dedented by above rules.
For example:
assertEquals("bar\nline1\nline2", dedent("bar\n line1\n line2"))
assertEquals("bar\nline1\nline2", dedent(" bar\n line1\n line2"))
assertEquals("bar\n line1\nline2", dedent(" bar\n line1\n line2"))
"""
text = textwrap.dedent(text)
text_lines = text.split('\n')
text_not_first = "\n".join(text_lines[1:])
text_not_first = textwrap.dedent(text_not_first)
text = text_lines[0] + "\n" + text_not_first
return text | b450a873c4c2b667d10c66985d19f8057aa205f9 | 8,235 |
from sys import stderr
def open_text_file_for_write(output_directory:str, file_name:str, verbose=False) -> TextIOWrapper:
"""Open a text file for writing"""
if verbose:
print(f"opening text file for write: {output_directory}/{file_name}", file=stderr)
return open(f"{output_directory}/{file_name}", 'w', encoding='utf-8') | e6e2a379f95c54ebec3f2e1d3a4c446ee169d2e7 | 8,236 |
def dpsplit(n,k, sig):
""" Perform the dynamic programming optimal segmentation, using the sig function
to determine the cost of a segment sig(i,j) is the cost of the i,j segment. These
are then added together
"""
# Set up the tracking tables
K = k + 1
N = n
segtable = np.zeros((n,K)) + np.nan
segtable[:,0] = [ sig(0,j+1) for j in xrange(N) ]
segindtable = np.zeros((N,K), dtype='int') - 1
# fill up the table in a clever order
for k in xrange(1,K):
for j in xrange(k,N):
#fill the j,k element
ans = min( ( (segtable[l,k-1] + sig(l+1,j+1), l+1 )
for l in xrange(k-1,j) ) )
segtable[j,k] = ans[0]
segindtable[j,k] = ans[1]
# read out the path
current_pointer = segindtable[-1,K-1]
path = [current_pointer]
for k in xrange(K-2, 0, -1):
current_pointer = segindtable[current_pointer-1, k]
path.append(current_pointer)
return sorted(path + [N]), segtable[-1,K-1] | db1513ae0a4725b63e62b102dc2c5fdd77fd4ceb | 8,237 |
from typing import List
def get_wind_tiles() -> List[Tile]:
"""return a list of four wind tiles
"""
return [Tile(Suit.JIHAI.value, Jihai.TON.value),
Tile(Suit.JIHAI.value, Jihai.NAN.value),
Tile(Suit.JIHAI.value, Jihai.SHAA.value),
Tile(Suit.JIHAI.value, Jihai.PEI.value)] | 469ec29795291bb8345fa3beccbbf2c6d4bb3101 | 8,238 |
import torch
import os
def train_mnist_classifier(lr=0.001, epochs=50, model_dir='.'):
"""train mnist classifier for inception score"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device {0!s}".format(device))
train_loader = load_mnist(batchSize=100, train=True)
test_loader = load_mnist(batchSize=100, train=False)
model = LeNet().to(device)
def evaluate():
model.eval()
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
pred = output.argmax(dim=1)
correct += pred.eq(target).sum().item()
accuracy = 100. * correct / len(test_loader.dataset)
return accuracy
train_criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=lr)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# training loop
print('Started training...')
best_test_acc = 0.0
best_test_epoch = 0
for epoch in range(1, epochs + 1):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data).squeeze(1)
loss = train_criterion(output, target)
loss.backward()
optimizer.step()
if batch_idx % 20 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
test_acc = evaluate()
print('Test Accuracy: {:.2f}\n'.format(test_acc))
if test_acc > best_test_acc:
best_test_epoch = epoch
best_test_acc = test_acc
torch.save(model.state_dict(), os.path.join(model_dir, "mnist_classifier.pt"))
print('Finished.')
print('Best: Epoch: {}, Test-Accuracy: {:.4f}\n'.format(best_test_epoch, best_test_acc)) | 91dbfaeae5e4ef5f34435c7034a7a06f980b1b3b | 8,239 |
from datetime import datetime
def datetime_now_filename_string():
"""create a string representation for now() for use as part of the MHL filename"""
return datetime.datetime.strftime(datetime.datetime.now(), "%Y-%m-%d_%H%M%S") | 37a733ddd93ca1bc4eed82e920222c644c494fcd | 8,240 |
from pathlib import Path
import inspect
import tqdm
def generate_simulation_dataset(path, runs, **kawrgs):
"""Generate and save a simulation dataset.
Parameters
----------
path : str
Root path where simulation data will be stored.
runs : int, array
If int then number of runs to use. If array then
array must be of one dim more than simulation grid
dim.
kawrgs :
run_multiple_sources kwargs.
Returns
-------
dataset : zarr.hierarchy.Group
Simulation dataset.
"""
# Convert path to pathlib path
path = Path(path)
# Create dataset
dataset = zarr.open(path.as_posix(), mode='w')
if not isinstance(runs, int):
full_speed_array = runs
runs = len(runs)
else:
full_speed_array = None
# Add dataset attributes
dataset.attrs['waver'] = True
dataset.attrs['dataset'] = True
dataset.attrs['runs'] = runs
# Add simulation attributes based on kwargs and defaults
parameters = inspect.signature(run_multiple_sources).parameters
for param, value in parameters.items():
if param in kawrgs:
dataset.attrs[param] = kawrgs[param]
else:
dataset.attrs[param] = value.default
# Initialize speed and wave arrays
speed_array = None
wave_array = None
# Move through runs
for run in tqdm(range(runs), leave=False):
if full_speed_array is not None:
kawrgs['speed'] = full_speed_array[run]
wave, speed = run_multiple_sources(**kawrgs)
if speed_array is None:
speed_array = dataset.zeros('speed', shape=(runs, ) + speed.shape, chunks=(1,) + (64,) * speed.ndim)
if wave_array is None:
wave_array = dataset.zeros('wave', shape=(runs, ) + wave.shape, chunks=(1,) + (64,) * wave.ndim)
speed_array[run] = speed
wave_array[run] = wave
return dataset | 8383f42cfe2604e5f82761bb351b6cf4f16f33aa | 8,241 |
def num_sites(sequence, rule, **kwargs):
"""Count the number of sites where `sequence` can be cleaved using
the given `rule` (e.g. number of miscleavages for a peptide).
Parameters
----------
sequence : str
The sequence of a polypeptide.
rule : str or compiled regex
A regular expression describing the site of cleavage. It is recommended
to design the regex so that it matches only the residue whose
C-terminal bond is to be cleaved. All additional requirements should be
specified using `lookaround assertions
<http://www.regular-expressions.info/lookaround.html>`_.
labels : list, optional
A list of allowed labels for amino acids and terminal modifications.
Returns
-------
out : int
Number of cleavage sites.
"""
return len(_cleave(sequence, rule, **kwargs)) - 1 | dc0840e33206c9db7058a7257a60c59bee0403f8 | 8,242 |
def get_packages(code: str) -> defaultdict:
"""Extracts the packages that were included in the file being inspected.
Source for this code: https://stackoverflow.com/questions/2572582/
Example:
input:
'from collections import Counter\n
import kivy\n
from stats import median as stats_median\n'
output:
defaultdict(<class 'list'>,
{'import_name': ['collections', 'kivy', 'stats'],
'import_from': ['Counter', 'median']}
)
"""
instructions = get_instructions(code)
import_instructions = [i for i in instructions if "IMPORT" in i.opname]
imports = defaultdict(list)
for instr in import_instructions:
imports[instr.opname.lower()].append(instr.argval)
return imports | 977f20e2d3c12993ef26ff8b199f943fb153c79b | 8,243 |
def beam_name():
"""Session level fixture for beam path."""
return str(beam_path) | a702d91c62024685d14125123ead41a2a4e38942 | 8,244 |
import itertools
def get_mv_sandwich(a_blade_indices, b_blade_indices, signature, prod="gp"):
"""a b ~a"""
out_indices = []
out_blade_indices = []
out_signs = []
out_indices = []
indices_a = []
indices_b = []
indices_a_r = []
blade_to_index = {}
for (i_a, index_a), (i_b, index_b), (i_a_r, index_a_r) in itertools.product(
enumerate(a_blade_indices),
enumerate(b_blade_indices),
enumerate(reverse_indices(a_blade_indices))
):
out_sign_1, out_index_1 = reduce_bases(index_a, index_b, signature)
out_sign_2, out_index = reduce_bases(out_index_1, index_a_r, signature)
out_sign = out_sign_1 * out_sign_2
if out_sign != 0 and (
prod == "gp" or
(prod == "op" and len(out_index) == abs(len(index_a) + len(index_b))) or
(prod == "ip" and len(out_index) == abs(len(index_a) - len(index_b)))
):
out_signs.append(out_sign)
indices_a.append(i_a)
indices_b.append(i_b)
indices_a_r.append(i_a_r)
if out_index in blade_to_index:
out_indices.append(blade_to_index[out_index])
else:
blade_to_index[out_index] = len(blade_to_index)
out_indices.append(blade_to_index[out_index])
out_blade_indices.append(out_index)
if len(out_indices) == 0:
def _values_mv_sandwich(a_values, b_values):
return jnp.zeros((), dtype=jnp.float32)
else:
out_size = max(out_indices) + 1
def _values_mv_sandwich(a_values, b_values):
out_batch_shape = jnp.broadcast_shapes(
a_values.shape[1:], b_values.shape[1:]
)
out_values = jnp.zeros(
[out_size, *out_batch_shape], dtype=jnp.float32
)
for index_a, index_b, index_a_r, out_sign, out_index in zip(indices_a, indices_b, indices_a_r, out_signs, out_indices):
out_values = out_values.at[out_index].add(
out_sign * a_values[index_a] * b_values[index_b] * a_values[index_a_r]
)
return out_values
_values_mv_sandwich_jit = jax.jit(_values_mv_sandwich)
return _values_mv_sandwich_jit, tuple(out_blade_indices) | 8e30f31de944bd6aa19e60fe7a93aceb8ccb73ef | 8,245 |
def ldns_dnssec_create_nsec3(*args):
"""LDNS buffer."""
return _ldns.ldns_dnssec_create_nsec3(*args) | 653d899f7d30e1e272c0a7026e4383e191b3e78f | 8,246 |
def S_difference_values(_data_lista, _data_listb):
"""
Returns new data samples where values are transformed by transformer values.
"""
d_data = []
dsa = len(_data_lista)
dsb = len(_data_listb)
if dsa != dsb:
return []
for i in range(dsa):
d_data.append(_data_lista[i] - _data_listb[i])
return d_data | 40ec82cb7ef53d5e227b3287a9c1d08e78112e09 | 8,247 |
def parseFixedZone(s):
"""Convert a +hhmm or -hhmm zone suffix.
[ s is a string ->
if s is a time zone suffix of the form "+hhmm" or "-hhmm" ->
return that zone information as an instance of a class
that inherits from datetime.tzinfo
else -> raise SyntaxError ]
"""
#-- 1 --
if s.startswith('+'): sign = 1
elif s.startswith('-'): sign = -1
else:
raise SyntaxError("Expecting zone modifier as {0}hhmm: "
"'{1}'".format(s[0], s))
#-- 2 --
# [ if s[1:] matches HHMM_PAT ->
# hours := the HH part as an int
# minutes := the MM part as an int
# else -> raise SyntaxError ]
rawHHMM = s[1:]
m = HHMM_PAT.match(rawHHMM)
if m is None:
raise SyntaxError("Expecting zone modifier as {0}HHMM: "
"'{1}'".format(s[0], s))
else:
hours = int(rawHHMM[:2])
minutes = int(rawHHMM[2:])
#-- 3 --
return FixedZone(sign*hours, sign*minutes, s) | 681b5ad02f228ee40b099a461131de42309e58f0 | 8,248 |
def tica_eigenvalues_plot(tica, num=12, plot_file=None):
"""
Plots the highest eigenvalues over the number of the time-lagged independent components.
Parameters
----------
tica : TICA obj
Time-lagged independent components information.
num : int, default = 12
Number of eigenvalues to plot.
plot_file : str, optional, default = None
Path and name of the file to save the plot.
"""
# Plot eigenvalues over component numbers.
fig,ax = plt.subplots(1, 1, figsize=[4,3], dpi=300)
componentnr = np.arange(num)+1
eigenvalues = tica.eigenvalues[:num]
ax.bar(componentnr, eigenvalues)
ax.set_xlabel('component number')
ax.set_ylabel('eigenvalue')
fig.tight_layout()
# Save the figure to a file.
if plot_file: fig.savefig(plot_file, dpi=300)
return componentnr, eigenvalues | 707ebbacf0dc90e96760ae26d316da4c27bdd997 | 8,249 |
from lxml import etree
import random, string
import os
import subprocess
import shlex
import re
def latex2svg(code, params=default_params, working_directory=None):
"""Convert LaTeX to SVG using dvisvgm and scour (or svgo).
Parameters
----------
code : str
LaTeX code to render.
params : dict
Conversion parameters.
working_directory : str or None
Working directory for external commands and place for temporary files.
Returns
-------
dict
Dictionary of SVG output and output information:
* `svg`: SVG data
* `width`: image width in *em*
* `height`: image height in *em*
* `valign`: baseline offset in *em*
"""
if working_directory is None:
with TemporaryDirectory() as tmpdir:
return latex2svg(code, params, working_directory=tmpdir)
# Caution: TeX & dvisvgm work with TeX pt (1/72.27"), but we need DTP pt (1/72")
# so we need a scaling factor for correct output sizes
# dvisvgm will produce a viewBox in DTP pt but SHOW TeX pt in its output.
scaling = 1.00375 # (1/72)/(1/72.27)
fontsize = params['fontsize']
document = (params['template']
.replace('{{ preamble }}', params['preamble'])
.replace('{{ fontsize }}', str(fontsize))
.replace('{{ code }}', code))
with open(os.path.join(working_directory, 'code.tex'), 'w') as f:
f.write(document)
# Run LaTeX and create DVI file
try:
ret = subprocess.run(shlex.split(params['latex_cmd']+' code.tex'),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=working_directory)
ret.check_returncode()
except FileNotFoundError:
raise RuntimeError('latex not found')
# Add LIBGS to environment if supplied
env = os.environ.copy()
if params['libgs']:
env['LIBGS'] = params['libgs']
# Convert DVI to SVG
dvisvgm_cmd = params['dvisvgm_cmd'] + ' --scale=%f' % params['scale']
dvisvgm_cmd += ' code.dvi'
try:
ret = subprocess.run(shlex.split(dvisvgm_cmd),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=working_directory, env=env)
ret.check_returncode()
except FileNotFoundError:
raise RuntimeError('dvisvgm not found')
# Parse dvisvgm output for size and alignment
def get_size(output):
regex = r'\b([0-9.]+)pt x ([0-9.]+)pt'
match = re.search(regex, output)
if match:
return (float(match.group(1)) / fontsize * scaling,
float(match.group(2)) / fontsize * scaling)
else:
return None, None
def get_measure(output, name):
regex = r'\b%s=([0-9.e-]+)pt' % name
match = re.search(regex, output)
if match:
return float(match.group(1)) / fontsize * scaling
else:
return None
output = ret.stderr.decode('utf-8')
width, height = get_size(output)
depth = get_measure(output, 'depth')
# no baseline offset if depth not found
if depth is None:
depth = 0.0
# Modify SVG attributes, to a get a self-contained, scaling SVG
# read SVG, discarding all comments ("<-- Generated by… -->")
parser = etree.XMLParser(remove_comments=True)
xml = etree.parse(os.path.join(working_directory, 'code.svg'), parser)
svg = xml.getroot()
svg.set('width', f'{width:.6f}em')
svg.set('height', f'{height:.6f}em')
svg.set('style', f'vertical-align:{-depth:.6f}em')
xml.write(os.path.join(working_directory, 'code.svg'))
# Run optimizer to get a minified oneliner with (pseudo-)unique Ids
# generate random prefix using ASCII letters (ID may not start with a digit)
prefix = ''.join(random.choice(string.ascii_letters) for n in range(3))
svgo_cmd = (params['svgo_cmd']
.replace('{{ infile }}', 'code.svg')
.replace('{{ outfile }}', 'optimized.svg'))
svgo_config = (params['svgo_config']
.replace('{{ prefix }}', prefix))
# with scour, input & output files must be different
scour_cmd = (params['scour_cmd']
.replace('{{ prefix }}', prefix+'_')
.replace('{{ infile }}', 'code.svg')
.replace('{{ outfile }}', 'optimized.svg'))
if params['optimizer'] == 'scour':
# optimize SVG using scour (default)
try:
ret = subprocess.run(shlex.split(scour_cmd),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=working_directory, env=env)
ret.check_returncode()
except FileNotFoundError:
raise RuntimeError('scour not found')
with open(os.path.join(working_directory, 'optimized.svg'), 'r') as f:
svg = f.read()
elif params['optimizer'] == 'svgo':
# optimize SVG using svgo (optional)
# write svgo params file
with open(os.path.join(working_directory, 'svgo.config.js'), 'w') as f:
f.write(svgo_config)
try:
ret = subprocess.run(shlex.split(svgo_cmd),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=working_directory, env=env)
ret.check_returncode()
except FileNotFoundError:
raise RuntimeError('svgo not found')
with open(os.path.join(working_directory, 'optimized.svg'), 'r') as f:
svg = f.read()
else:
# no optimization, just return SVG
with open(os.path.join(working_directory, 'code.svg'), 'r') as f:
svg = f.read()
return {'svg': svg, 'valign': round(-depth,6),
'width': round(width,6), 'height': round(height,6)} | 5bfc425d206d0d0ee61e6ef2bff7db061d5750c0 | 8,250 |
def _split_train_test(features, labels, train_set, random_seed):
"""Split the dataset into training and test sets.
Parameters
----------
features : pandas.DataFrame
Features of the dataset events.
labels : pandas.DataFrame
Labels of the dataset events.
train_set : {float, list-like}
If float, it is the fraction of objects that will be used as training
set. If list, it is the IDs of the objects to use as training set.
random_seed : {int, RandomState instance}
Random seed or random state instance to use. It allows reproducible
results.
Returns
-------
X_train : pandas.DataFrame
Features of the events with which to train the classifier.
X_test : pandas.DataFrame
Features of the events with which to test the classifier.
y_train : pandas.core.series.Series
Labels of the events with which to train the classifier.
y_test : pandas.core.series.Series
Labels of the events with which to test the classifier.
"""
if np.isscalar(train_set): # `train_set` was the size of training set
X_train, X_test, y_train, y_test = model_selection.train_test_split(
features, labels, train_size=train_set,
random_state=random_seed)
else: # `train_set` was a list of object names
X_train = features.loc[train_set]
y_train = labels.loc[train_set]
is_not_train_set = ~ features.index.isin(train_set)
X_test = features[is_not_train_set]
y_test = labels[is_not_train_set]
return X_train, X_test, y_train, y_test | 67210e0462cdd4be58f5446b6220f921aa8c4ea0 | 8,251 |
from sideboard.lib._services import _register_rpc_services
import os
def parse_config(requesting_file_path, is_plugin=True):
"""
Parse the config files for a given sideboard plugin, or sideboard itself.
It's expected that this function is called from one of the files in the
top-level of your module (typically the __init__.py file)
Args:
requesting_file_path (str): The __file__ of the module requesting the
parsed config file. An example value is::
/opt/sideboard/plugins/plugin-package-name/plugin_module_name/__init__.py
the containing directory (here, `plugin_module_name`) is assumed
to be the module name of the plugin that is requesting a parsed
config.
is_plugin (bool): Indicates whether a plugin is making the request or
Sideboard itself is making the request. If True (default) add
plugin-relevant information to the returned config. Also, treat it
as if it's a plugin
Returns:
ConfigObj: The resulting configuration object.
"""
module_dir, root_dir = get_module_and_root_dirs(requesting_file_path, is_plugin)
specfile = os.path.join(module_dir, 'configspec.ini')
spec = configobj.ConfigObj(specfile, interpolation=False, list_values=False, encoding='utf-8', _inspec=True)
# to allow more/better interpolations
root_conf = ['root = "{}"\n'.format(root_dir), 'module_root = "{}"\n'.format(module_dir)]
temp_config = configobj.ConfigObj(root_conf, interpolation=False, encoding='utf-8')
for config_path in get_config_files(requesting_file_path, is_plugin):
# this gracefully handles nonexistent files
temp_config.merge(configobj.ConfigObj(config_path, encoding='utf-8', interpolation=False))
# combining the merge files to one file helps configspecs with interpolation
with NamedTemporaryFile(delete=False) as config_outfile:
temp_config.write(config_outfile)
temp_name = config_outfile.name
config = configobj.ConfigObj(temp_name, encoding='utf-8', configspec=spec)
validation = config.validate(Validator(), preserve_errors=True)
unlink(temp_name)
if validation is not True:
raise ConfigurationError('configuration validation error(s) (): {!r}'.format(
configobj.flatten_errors(config, validation))
)
if is_plugin:
sideboard_config = globals()['config']
config['plugins'] = deepcopy(sideboard_config['plugins'])
if 'rpc_services' in config:
_register_rpc_services(config['rpc_services'])
if 'default_url' in config:
priority = config.get('default_url_priority', 0)
if priority >= sideboard_config['default_url_priority']:
sideboard_config['default_url'] = config['default_url']
return config | d8cfd06356d8f20040646b48da7c236e71db90fa | 8,252 |
def generate_enc_keypair():
"""
Generate Curve25519 keypair
:returns tuple: A byte pair containing the encryption key and decryption
key.
"""
private_key = PrivateKey.generate()
return private_key.public_key.encode(), private_key.encode() | c2ba00b6463d7ab1708dcc43d9cafba2d11af0c6 | 8,253 |
from typing import List
from typing import Tuple
def filter_molecular_components(
components: List[Component],
) -> Tuple[List[Component], List[Component]]:
"""Separate list of components into molecular and non-molecular components.
Args:
components: A list of structure components, generated using
:obj:`pymatgen.analysis.dimensionality.get_structure_components`.
Returns:
The filtered components as a tuple of ``(molecular_components,
other_components)``.
"""
molecular_components = [c for c in components if c["dimensionality"] == 0]
other_components = [c for c in components if c["dimensionality"] != 0]
return molecular_components, other_components | 72a43a5195ef3d35ca8216225dcae7f699c7bbd5 | 8,254 |
import types
def argument(name, type):
"""
Set the type of a command argument at runtime. This is useful for more
specific types such as mitmproxy.types.Choice, which we cannot annotate
directly as mypy does not like that.
"""
def decorator(f: types.FunctionType) -> types.FunctionType:
assert name in f.__annotations__
f.__annotations__[name] = type
return f
return decorator | 8f93c8e8cd4289d2b4747feb93ecfe3df74350f7 | 8,255 |
async def async_google_actions_request_sync(cloud):
"""Request a Google Actions sync request."""
return await cloud.websession.post(
f"{cloud.google_actions_report_state_url}/request_sync",
headers={AUTHORIZATION: f"Bearer {cloud.id_token}"},
) | 5d75e4b67bc04878108066660b0f43939a1eab4e | 8,256 |
def convert_time_string_to_secs(string: str) -> int:
"""
Takes a string in the format '1h30m25s' and converts it to an integer
in seconds. This functions uses the regular expression RE_CONVERT_TIME
above for matching the string.
"""
match = regexp_time.match(string)
if not match:
raise ValueError("String {0} has an invalid representation")
h, m, s, ms, us = match.groups()
h = int(h) if h else 0
m = int(m) if m else 0
s = int(float(s)) if s else 0
total_time_seconds = h*3600 + m*60 + s
return total_time_seconds | b520fde06640cd3d22a6c619031633bf21383687 | 8,257 |
def polar_cube(c, index, n=512, interp='cubic'):
"""VIMS cube polar projected.
Parameters
----------
c: pyvims.VIMS
Cube to interpolate.
index: int, float, str, list, tuple
VIMS band or wavelength to plot.
n: int, optional
Number of pixel for the grid interpolation.
interp: str, optional
Interpolation method
"""
# Pixel data
data = c[index]
# Choose which pole to display
n_pole = c.sc_lat > 0
# Pixel positions in polar projection
pixels = polar_proj(c.ground_lon, c.ground_lat, n_pole=n_pole)
# Contour positions in polar projection
contour = polar_proj(*c.clonlat, n_pole=n_pole)
# Interpolate data (with mask)
z, grid, extent = polar_interp(pixels, data, contour, n=n, method=interp)
return z, grid, extent, pixels, contour, n_pole | 6a02932e8685a1cdd43e6831b2a3544bd903a40b | 8,258 |
from nibabel.gifti.parse_gifti_fast import ParserCreate, Outputter
import gzip
def _load_surf_files_gifti_gzip(surf_file):
"""Load surface data Gifti files which are gzipped. This
function is used by load_surf_mesh and load_surf_data for
extracting gzipped files.
Part of the code can be removed while bumping nibabel 2.0.2
"""
with gzip.open(surf_file) as f:
as_bytes = f.read()
if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'):
parser = gifti.GiftiImage.parser()
parser.parse(as_bytes)
gifti_img = parser.img
else:
parser = ParserCreate()
parser.buffer_text = True
out = Outputter()
parser.StartElementHandler = out.StartElementHandler
parser.EndElementHandler = out.EndElementHandler
parser.CharacterDataHandler = out.CharacterDataHandler
parser.Parse(as_bytes)
gifti_img = out.img
return gifti_img | 2fdc083a208f1a288b7d99bd3863bff22d36bf50 | 8,259 |
import platform
def get_platform_system():
"""return platform.system
platform module has many regexp, so importing it is slow...
import only if required
"""
return platform.system() | 2531f1883d5acd0c192c0061d7cbf29637197706 | 8,260 |
def populate_sql(
sql: sqlparse.sql.Statement, example: NLToSQLExample, anonymize_values: bool
) -> bool:
"""
Creates a sequence of output / decoder actions from a raw SQL query.
Args:
sql: The SQL query to convert.
example: The NLToSQLExample object to add output actions.
anonymize_values: Whether to anonymize values by replacing with a placeholder.
Raises:
ParseError: if the SQL query can't be parsed.
Returns:
Boolean indicating whether all actions copying values from the input utterance were successfully completed.
"""
successful_copy = True
for item in sql:
if item.ttype == sqlparse.tokens.Text.Whitespace:
continue
if _is_punctuation(item) and (item.value in ("(", ")")):
_add_simple_step(item, example)
continue
if _is_punctuation(item) and (item.value in (",",)):
_add_simple_step(item, example)
continue
if _is_parenthesis(item):
successful_copy = (
populate_sql(item, example, anonymize_values) and successful_copy
)
continue
if _is_wildcard(item):
_add_simple_step(item, example)
continue
if _is_select(item) or _is_from(item):
_add_simple_step(item, example)
continue
if _is_where(item):
successful_copy = (
_parse_where(item, example, anonymize_values) and successful_copy
)
continue
if _is_function(item):
successful_copy = (
_parse_function(item, example, anonymize_values) and successful_copy
)
continue
if _is_identifier(item):
successful_copy = (
_parse_identifier(item, example, anonymize_values) and successful_copy
)
continue
if _is_identifier_list(item):
successful_copy = (
_parse_identifier_list(item, example, anonymize_values)
and successful_copy
)
continue
if _is_keyword(item) and item.value.lower() in (
"group",
"order",
"by",
"having",
"order by",
"group by",
):
_add_simple_step(item, example)
continue
if _is_keyword(item) and item.value.lower() in (
"count",
"avg",
"min",
"max",
"sum",
):
_add_simple_step(item, example)
continue
if _is_operation(item):
successful_copy = (
_parse_operation(item, example, anonymize_values) and successful_copy
)
continue
if _is_keyword(item) and item.value.lower() in ("between", "and", "or"):
_add_simple_step(item, example)
continue
if _is_order(item):
_add_simple_step(item, example)
continue
if _is_keyword(item) and item.value.lower() in ("is", "not null", "in", "not"):
_add_simple_step(item, example)
continue
if _is_keyword(item) and item.value.lower() in ("distinct",):
_add_simple_step(item, example)
continue
if _is_keyword(item) and item.value.lower() in ("limit",):
_add_simple_step(item, example)
continue
if _is_keyword(item) and item.value.lower() in ("join", "on"):
_add_simple_step(item, example)
continue
if _is_keyword(item) and item.value.lower() in ("intersect", "union"):
_add_simple_step(item, example)
continue
if _is_keyword(item) and item.value.lower() in ("except",):
_add_simple_step(item, example)
continue
if _is_integer(item) and example.gold_sql_query.actions[
len(example.gold_sql_query.actions) - 1
].symbol in ("limit", "between", "and"):
prev_len = len(example.gold_sql_query.actions)
successful_copy = (
_add_simple_value(item, example, anonymize_values) and successful_copy
)
if len(example.gold_sql_query.actions) == prev_len:
raise ValueError(
"Gold query did not change length when adding simple value!"
)
continue
if _is_comparison(item):
successful_copy = (
_parse_comparison(item, example, anonymize_values) and successful_copy
)
continue
_debug_state(item, example)
raise ParseError("Incomplete _parse_sql")
return successful_copy | 54aaa9465eebdad55e04ec6c840ee760de4e79ee | 8,261 |
import sympy
def factorial(n):
"""Stop sympy blindly calculating factorials no matter how large.
If 'n' is a number of some description, ensure that it is smaller than
a cutoff, otherwise sympy will simply evaluate it, no matter how long that
may take to complete!
- 'n' should be a sympy object, that sympy.factorial(...) can use.
"""
if isinstance(n, (Integer, Float, Rational)) and n > 50:
raise ValueError("[Factorial]: Too large integer to compute factorial effectively!")
else:
return sympy.factorial(n) | 73dc223df2b23a93aafb0ec2b897f1668869e07a | 8,262 |
import re
def getSupplier(num):
"""" get the supplier for a card number
Attributes:
@num: card number
"""
supplier = str()
for key, value in suppliers.items():
if bool(re.match(value, num)):
supplier = key
break
if supplier == "":
supplier = "Ukwnow"
return supplier | 2572a0595d03cc3056b1155f8a3f0b007ec65b9e | 8,263 |
from typing import Optional
import torch
def load_torch_hub_model(repo: str, model: str, *args, **kwargs):
"""Tries to load a torch hub model and handles different exceptions that could be raised.
Args:
repo: The GitHub repository containing the models.
model: The model name to download.
max_retries: The maximum number of tries to download the model.
Returns:
The downloaded torch model.
"""
error: Optional[Exception] = None
for _ in range(TORCH_HUB_DOWNLOAD_MAX_RETRIES + 1):
try:
try:
return torch.hub.load(
repo,
model,
*args,
**kwargs,
)
except RuntimeError:
return torch.hub.load(
repo,
model,
*args,
**kwargs,
force_reload=True,
)
except Exception as e:
error = e
assert error is not None
raise error | 3cc928f1026d276290ed97360a0cfebbcde82bb8 | 8,264 |
def compute_medoid(data):
"""
Get medoid of data
Parameters
----------
data: ndarray
Data points
Returns
------
medoid: ndarray
Medoid
"""
dist_mat = pairwise_distances(data)
return data[np.argmin(dist_mat.sum(axis=0))] | 3fd071cd6c48566caa3a52ead26f06621682703d | 8,265 |
def int_sphere(fx, xgrid):
"""
Computes integrals over the sphere defined by the logarithmic
grid provided as input
Parameters
----------
fx : array_like
The function (array) to be integrated
xgrid : ndarray
The logarithmic radial grid
Returns
-------
I_sph : float
The value of the integrand
Notes
-----
The integral formula is given by
.. math:: I = 4 \pi \int \dd{x} e^{3x} f(x)
"""
func_int = 4.0 * pi * np.exp(3.0 * xgrid) * fx
I_sph = np.trapz(func_int, xgrid)
return I_sph | d92962cde5200f0c25d8bd0e1011c969e2287125 | 8,266 |
import os
def run(
func,
args=[],
kwargs={},
service="lambda",
capture_response=False,
remote_aws_lambda_function_name=None,
remote_aws_region=None,
**task_kwargs
):
"""
Instead of decorating a function with @task, you can just run it directly.
If you were going to do func(*args, **kwargs), then you will call this:
import zappa.asynchronous.run
zappa.asynchronous.run(func, args, kwargs)
If you want to use SNS, then do:
zappa.asynchronous.run(func, args, kwargs, service='sns')
and other arguments are similar to @task
"""
lambda_function_name = remote_aws_lambda_function_name or os.environ.get(
"AWS_LAMBDA_FUNCTION_NAME"
)
aws_region = remote_aws_region or os.environ.get("AWS_REGION")
task_path = get_func_task_path(func)
return ASYNC_CLASSES[service](
lambda_function_name=lambda_function_name,
aws_region=aws_region,
capture_response=capture_response,
**task_kwargs
).send(task_path, args, kwargs) | fdf1c4b8f654c27c2ac04a945b44e54b19e1b00f | 8,267 |
def run_webserver(destination_root_dir):
""" Run a local """
destination_root_dir = destination_root_dir
if destination_root_dir.startswith('/'):
destination_root_dir = destination_root_dir[1:]
if destination_root_dir.endswith('/'):
destination_root_dir = destination_root_dir[:-1]
app = Flask(__name__)
@app.route('/')
@app.route('/<path:filename>')
def serve_static_html(filename='index.html'):
""" Serve static HTML files
:type filename: str
:param filename: Path to the static HTML file
"""
if filename.startswith(destination_root_dir):
filename = filename.replace('{}/'.format(destination_root_dir), '')
return redirect('/{}'.format(filename))
response = make_response(
send_from_directory('/{}'.format(destination_root_dir), filename))
response.cache_control.no_cache = True
return response
app.run() | df5d26ae754009135061abb4a1a2d1cad0937e97 | 8,268 |
from typing import Union
from typing import List
def sym_to_elm(symbols: Union[str, List, np.ndarray],
order: Union[np.ndarray, List[str]]):
"""Transform symbols to elements."""
if not isinstance(order, list):
order = order.tolist()
if not isinstance(symbols, (str, list)):
symbols = symbols.tolist()
if isinstance(symbols, str):
if symbols in order:
return order.index(symbols)
else:
return -1
else:
return np.array([sym_to_elm(s, order) for s in symbols]) | 16e2a88b353556068e8c1a3fa7c831264fd9f3c5 | 8,269 |
def set_custom_field(
custom_field_id: str = None,
person_id: str = None,
owner_id: str = None,
term_id: str = None,
value: str = None,
option_index: str = None):
"""
Sets a custom field value for a particular person, organization, or donation.
:param custom_field_id: The numeric ID of the custom field you're interested in.
:param person_id: The numeric ID of the person you're interested in.
:param owner_id: The numeric ID of object you're interested in, if they are not a person.
:param term_id: The numeric ID of the term you're interested in.
:param value: The value for this field.
:param option_index: For RADIOs and SELECTs, you can pass in the index of the selected option.
:returns: String containing xml or an lxml element.
"""
return get_anonymous(
'setCustomField',
custom_field_id=custom_field_id,
person_id=person_id,
owner_id=owner_id,
term_id=term_id,
value=value,
option_index=option_index) | 3d9471c62644e8e19f7b6faa03f3b503d5db7673 | 8,270 |
def ixn_is_increases_activity(ixn: ChemGeneIxn):
"""Checks if the interaction results in the decrease of the activity of the protein of the gene
:param pyctd.manager.models.ChemGeneIxn ixn: A chemical-gene interaction
:rtype: bool
"""
return _ixn_is_changes_protein(ixn, 'increases^activity') | 0b324a953ed2a90a9a357965ad4e5ef4a635c2df | 8,271 |
def load(csv, sep=';'):
"""
Load data into dataframe
:param csv:
:param sep:
:return:
"""
data = pd.read_csv(csv, sep=sep)
return data | da988e31601b13a767178b4d6613d948100ddfc9 | 8,272 |
import os
import json
def get_docs(request):
"""
вернуть список
[
{doc_id, doc_name},
{doc_id, doc_name},
]
"""
doc_set_id = request.GET["doc_set_id"]
docs = Document.objects.filter(doc_set_id=doc_set_id)
response_data = []
for doc in docs:
filename, file_extension = os.path.splitext(doc.get_path())
doc_name, _ = os.path.splitext(doc.doc_name)
out_name = f"{filename}.pdf"
response_data.append(
{
"doc_id": doc.id,
"doc_name": f"{doc_name}.pdf",
"path": out_name.split("/static")[1],
}
)
return HttpResponse(json.dumps(response_data), content_type="application/json") | 86d395b5a391a25b141d9d7f2139cf40ead94d41 | 8,273 |
def count_lost_norm4(matrix):
"""calculate 4th lost points: Proportion of dark modules in entire symbol:
50 + (5 + k) or 50 - (5 + k), return k * 10
Args:
matrix ([type]): [description]
Returns:
[int]: [description]
"""
dark_sum = np.sum(matrix)
modules_num = matrix.size
dark_ratio = dark_sum / modules_num
k = abs((dark_ratio * 100 - 50)) / 5
return int(k) * 10 | ad05892952af5cfc5dbd8273bbc1357d31b1a295 | 8,274 |
def sumaDigits(s):
"""assumes s is a string and returns the sum of the
decimal digits in s. For example if s is 'a2b3c' it returns 5"""
suma = 0
for c in s:
try:
suma+=int(c)
except ValueError:
continue
return suma | 47b09476925d45741d97eca5362e736f83a8185d | 8,275 |
def f5_list_policy_file_types_command(client: Client, policy_md5: str) -> CommandResults:
"""
Get a list of all policy file types.
Args:
client (Client): f5 client.
policy_md5 (str): MD5 hash of the policy.
"""
result = client.list_policy_file_types(policy_md5)
table_name = 'f5 data for listing policy file types:'
readable_output, printable_result = build_command_result(result, table_name)
command_results = CommandResults(
outputs_prefix='f5.FileType',
outputs_key_field='id',
readable_output=readable_output,
outputs=printable_result,
raw_response=result
)
return command_results | b2b57d281b0cc3ea0ff7430d2033255071761a46 | 8,276 |
def recurrent_layer(input,
act=None,
bias_attr=None,
param_attr=None,
name=None,
reverse=False,
layer_attr=None):
"""
Simple recurrent unit layer. It is just a fully connect layer through both
time and neural network.
For each sequence [start, end] it performs the following computation\:
.. math::
out_{i} = act(in_{i}) \\ \\ \\text{for} \\ i = start \\\\
out_{i} = act(in_{i} + out_{i-1} * W) \\ \\ \\text{for} \\ start < i <= end
If reversed is true, the order is reversed\:
.. math::
out_{i} = act(in_{i}) \\ \\ \\text{for} \\ i = end \\\\
out_{i} = act(in_{i} + out_{i+1} * W) \\ \\ \\text{for} \\ start <= i < end
:param input: Input Layer
:type input: LayerOutput
:param act: activation.
:type act: BaseActivation
:param bias_attr: bias attribute.
:type bias_attr: ParameterAttribute
:param param_attr: parameter attribute.
:type param_attr: ParameterAttribute
:param name: name of the layer
:type name: basestring
:param layer_attr: Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
Layer(
name=name,
type=LayerType.RECURRENT_LAYER,
inputs=Input(input.name, **param_attr.attr),
active_type=act.name,
bias=ParamAttr.to_bias(bias_attr),
reversed=reverse,
**ExtraAttr.to_kwargs(layer_attr))
return LayerOutput(
name=name,
layer_type=LayerType.RECURRENT_LAYER,
parents=[input],
size=input.size,
activation=act,
reverse=reverse) | b616d372a9324c11aa1bb524d96844ce1e8c47e5 | 8,277 |
def mean_center(X):
"""
@param X: 2-dimensional matrix of number data
@type X: numpy array
@return: Mean centered X (always has same dimensions as X)
"""
(rows, cols) = shape(X)
new_X = zeros((rows, cols), float)
_averages = average(X, 0)
for row in range(rows):
new_X[row, 0:cols] = X[row, 0:cols] - _averages[0:cols]
return new_X | 54885596c95856b0ce0f7fe68d2922641e7a830a | 8,278 |
import collections
import csv
def csv_to_json(order_sentence_file: str, order_comment_file: str, os_filter_file: str=None) -> dict:
"""
Conversion of CSV to dictionary/JSON for sequenced PowerPlans and clinical
category
:param order_sentence_file:
:param order_comment_file:
:return:
"""
output_dict = collections.defaultdict()
details_dict = create_os_details_dict(os_file=order_sentence_file,
comp_file=order_comment_file,
os_filter_file=os_filter_file)
with open(order_comment_file, "r") as f:
reader = csv.DictReader(f)
row = next(reader)
field_names = list(row.keys())
with open(order_comment_file, 'r') as f:
reader = csv.DictReader(f, fieldnames=field_names)
next(reader)
for row in reader:
powerplan = row['POWERPLAN_DESCRIPTION']
if not powerplan:
continue
phase = row['PHASE']
powerplan_display_method = row['PLAN_DISPLAY_METHOD']
phase_display_method = row['PHASE_DISPLAY_METHOD']
dcp_clin_cat = row['DCP_CLIN_CAT']
dcp_clin_sub_cat = row['DCP_CLIN_SUB_CAT']
sequence = int(row['SEQUENCE'].strip())
bgcolor_red = row['BGCOLOR_RED']
bgcolor_green = row['BGCOLOR_GREEN']
bgcolor_blue = row['BGCOLOR_BLUE']
synonym = row['COMPONENT']
iv_synonym = row.get("IV_COMPONENT")
orderable_type_flag = int(row.get("ORDERABLE_TYPE_FLAG"))
target_duration = row['TARGET_DURATION']
start_offset = row['START_OFFSET']
link_duration_to_phase = row['LINK_DURATION_TO_PHASE']
required_ind = row['REQUIRED_IND']
include_ind = row['INCLUDE_IND']
chemo_ind = row['CHEMO_IND']
chemo_related_ind = row['CHEMO_RELATED_IND']
persistent_ind = row['PERSISTENT_IND']
linking_rule = row.get("LINKING_RULE")
linking_rule_quantity = row.get("LINKING_RULE_QUANTITY")
linking_rule_flag = row.get("LINKING_RULE_FLAG")
linking_override_reason = row.get("LINKING_OVERRIDE_REASON")
assigned_dots = row.get("ASSIGNED_DOTS")
if row['ORDER_SENTENCE_ID'] is not None:
order_sentence_id = int(float(row['ORDER_SENTENCE_ID']))
else:
order_sentence_id = 0
if row['ORDER_SENTENCE_SEQ'] is not None and row['ORDER_SENTENCE_SEQ']:
sent_seq = int(row['ORDER_SENTENCE_SEQ'].strip())
else:
sent_seq = 0
if powerplan not in output_dict:
output_dict[powerplan] = {
'display_method': powerplan_display_method,
'phases': {}
}
phase_dict = output_dict.get(powerplan).get('phases')
if not phase:
phase = powerplan
phase_display_method = powerplan_display_method
if phase not in phase_dict:
phase_dict[phase] = {
'phase_display_method': phase_display_method,
'components': []
}
comp_dict = phase_dict.get(phase).get('components')
component_idx = find_key_val_idx_in_list(
lst=comp_dict, key='sequence', value=sequence
)
if component_idx is None:
comp_dict.append({
'synonym': synonym,
'orderable_type_flag': orderable_type_flag,
'dcp_clin_cat': dcp_clin_cat,
'dcp_clin_sub_cat': dcp_clin_sub_cat,
'sequence': sequence,
'target_duration': target_duration,
'start_offset': start_offset,
'link_duration_to_phase': link_duration_to_phase,
'required_ind': required_ind,
'include_ind': include_ind,
'chemo_ind': chemo_ind,
'chemo_related_ind': chemo_related_ind,
'persistent_ind': persistent_ind,
'linking_rule': linking_rule,
'linking_rule_quantity': linking_rule_quantity,
'linking_rule_flag': linking_rule_flag,
'linking_override_reason': linking_override_reason,
'assigned_dots': assigned_dots,
'bgcolor_red': bgcolor_red,
'bgcolor_green': bgcolor_green,
'bgcolor_blue': bgcolor_blue,
'order_sentences': []
})
component_idx = -1
sent_list = comp_dict[component_idx].get('order_sentences')
# sentence_idx = find_key_val_idx_in_list(
# lst=sent_list, key='sequence', value=sent_seq
# )
order_sentence_details = details_dict.get(order_sentence_id)
if order_sentence_id > 0:
sent_list.append({
'sequence': sent_seq,
'order_sentence_id': order_sentence_id,
'order_sentence_details': order_sentence_details,
'iv_synonym': iv_synonym
})
sentence_idx = -1
# TODO: Refactor this to have a domain key and a powerplans key that
# will hold the powerplans dictionary
if 'b0783' in order_comment_file.lower():
domain = 'b0783'
elif 'p0783' in order_comment_file.lower():
domain = 'p0783'
output = dict()
output['domain'] = domain
output['powerplans'] = output_dict
return output | 9a4021168624773233dbea5eb6a5e6f0b7eacee5 | 8,279 |
import csv
from io import StringIO
def process_xlsform(xls, default_name):
"""
Process XLSForm file and return the survey dictionary for the XLSForm.
"""
# FLOW Results package is a JSON file.
file_object = None
if xls.name.endswith('csv'):
# a csv file gets closed in pyxform, make a copy
xls.seek(0)
file_object = BytesIO()
file_object.write(xls.read())
file_object.seek(0)
xls.seek(0)
try:
return parse_file_to_json(xls.name, file_object=file_object or xls)
except csv.Error as e:
if is_newline_error(e):
xls.seek(0)
file_object = StringIO(
u'\n'.join(xls.read().splitlines()))
return parse_file_to_json(
xls.name, default_name=default_name, file_object=file_object)
raise e | af695bef6f063b2bfa7862e856e16ab42be2db96 | 8,280 |
def unflatten(X: np.ndarray, Y: np.ndarray, shape: tuple):
""" Unflattens images with shape defined by list of tuples s
X is an array (1D), unflattened to 2D
Y is an array (1D) of flattened mask (flattened 2D label) array
Not that X and Y are not compatible dimensions
s denotes dimensions of the *INPUT* image
len(s) == 3 : reshape to 2D label image
len(s) == 2 : input is flattened image, ignore.
"""
# This need to be tested.
Yout = Y.copy()
Yout[Y!=LABEL_IGNORE] = X
Yout = np.reshape(Yout,(shape[0], shape[1]))
return Yout | 7a1a79b165d44efd55c2e66936e072298cd5d648 | 8,281 |
from typing import Dict
from typing import Any
from typing import List
import logging
from pathlib import Path
def collate_features(model_config: Dict[str, Any], dummy_features: List[str]) -> List[str]:
"""Saves and returns final list of simple and dummy features."""
simple_features = list(model_config.get("simple_features", {}).keys())
features = simple_features + dummy_features
logging.info(
f"Model uses {len(simple_features)} simple features and"
+ f"{len(dummy_features)} dummy features"
+ f"for {len(features)} features total"
)
output_path = Path(utils.get_model_path(model_config), "features.txt")
logging.info(f"Saving list of features to {output_path}")
with open(output_path, "w") as f:
for feature in features:
f.write(feature)
f.write("\n")
return features | e66e3aceb0b5bf093a4fc3165a30694875903b73 | 8,282 |
from typing import Type
def new_dga(*, key_mo=None, pred=None, deg_diff=None) -> Type[DgaGb]:
"""Return a dynamically created subclass of GbDga.
When key_mo=None, use revlex ordering by default."""
class_name = f"GbDga_{DgaGb._index_subclass}"
DgaGb._index_subclass += 1
if deg_diff is not None:
deg_diff = Vector(deg_diff)
else:
raise BA.MyDegreeError("degree of differential not supplied")
dct = {
"gens": {},
"rels": {},
"_rels_buffer": {},
"key_mo": key_mo,
"pred": pred or pred_always_true,
"dim_grading": None,
"deg_diff": deg_diff,
}
return type(class_name, (DgaGb,), dct) | 32cfa58eec7512dd7b39b2298608df538a232ef9 | 8,283 |
def is_xarray(array):
"""Return True if array is a xarray.DataArray
Parameters
----------
array : array-like
Returns
-------
test : bool
"""
return isinstance(array,xr.DataArray) | edf14a0c87e6590e6a583425ec830e51defe5fa1 | 8,284 |
from typing import Counter
def _check_duplicate_gnames(block_id, block_dict, extra_args):
"""
Return False if any duplicate group names exist in /etc/group file, else return True
"""
gnames = _execute_shell_command("cat /etc/group | cut -f1 -d\":\"", python_shell=True).strip()
gnames = gnames.split('\n') if gnames != "" else []
duplicate_gnames = [k for k, v in Counter(gnames).items() if v > 1]
if duplicate_gnames is None or duplicate_gnames == []:
return True
return str(duplicate_gnames) | 2a181ca67f87f0f90eb97c1df0e7ae8db6ee2206 | 8,285 |
def join_nonempty(l):
"""
Join all of the nonempty string with a plus sign.
>>> join_nonempty(('x1 + x2 + x1:x2', 'x3 + x4'))
'x1 + x2 + x1:x2 + x3 + x4'
>>> join_nonempty(('abc', '', '123', ''))
'abc + 123'
"""
return ' + '.join(s for s in l if s != '') | 041948f95caaef14cb96e761f08b4a84fba37d6e | 8,286 |
import torch
def correct_msa_restypes(protein):
"""Correct MSA restype to have the same order as rc."""
new_order_list = rc.MAP_HHBLITS_AATYPE_TO_OUR_AATYPE
new_order = torch.tensor(
[new_order_list] * protein["msa"].shape[1],
device=protein["msa"].device,
).transpose(0, 1)
protein["msa"] = torch.gather(new_order, 0, protein["msa"])
perm_matrix = np.zeros((22, 22), dtype=np.float32)
perm_matrix[range(len(new_order_list)), new_order_list] = 1.0
for k in protein:
if "profile" in k:
num_dim = protein[k].shape.as_list()[-1]
assert num_dim in [
20,
21,
22,
], "num_dim for %s out of expected range: %s" % (k, num_dim)
protein[k] = torch.dot(protein[k], perm_matrix[:num_dim, :num_dim])
return protein | 881736333e3153c9c7713f7a54252eba705b7bb8 | 8,287 |
def plot_bootstrap_lr_grp(dfboot, df, grp='grp', prm='premium', clm='claim',
title_add='', force_xlim=None):
""" Plot bootstrapped loss ratio, grouped by grp """
count_txt_h_kws, mean_txt_kws, pest_mean_point_kws, mean_point_kws = _get_kws_styling()
if dfboot[grp].dtypes != 'object':
dfboot = dfboot.copy()
dfboot[grp] = dfboot[grp].map(lambda x: f's{x}')
mn = dfboot.groupby(grp)['lr'].mean().tolist()
pest_mn = df.groupby(grp).apply(lambda g: np.nan_to_num(g[clm], 0).sum() / g[prm].sum()).values
f = plt.figure(figsize=(14, 2+(len(mn)*.25))) #, constrained_layout=True)
gs = gridspec.GridSpec(1, 2, width_ratios=[11, 1], figure=f)
ax0 = f.add_subplot(gs[0])
ax1 = f.add_subplot(gs[1], sharey=ax0)
_ = sns.violinplot(x='lr', y=grp, data=dfboot, kind='violin', cut=0,
scale='count', width=0.6, palette='cubehelix_r', ax=ax0)
_ = [ax0.plot(v, i%len(mn), **mean_point_kws) for i, v in enumerate(mn)]
_ = [ax0.annotate(f'{v:.1%}', xy=(v, i%len(mn)), **mean_txt_kws) for i, v in enumerate(mn)]
_ = [ax0.plot(v, i%len(pest_mn), **pest_mean_point_kws) for i, v in enumerate(pest_mn)]
elems = [Line2D([0],[0], label='population (bootstrap)', **mean_point_kws),
Line2D([0],[0], label='sample', **pest_mean_point_kws)]
_ = ax0.legend(handles=elems, title='Mean LRs') #loc='upper right',
if force_xlim is not None:
_ = ax0.set(xlim=force_xlim)
_ = sns.countplot(y=grp, data=df, ax=ax1, palette='cubehelix_r')
ct = df.groupby(grp).size().tolist()
_ = [ax1.annotate(f'{v}', xy=(v, i%len(ct)), **count_txt_h_kws) for i, v in enumerate(ct)]
ypos = 1.01
if title_add != '':
ypos = 1.03
title_add = f'\n{title_add}'
title = (f'Grouped Loss Ratios (Population Estimates via Bootstrapping)' +
f' - grouped by {grp}')
_ = f.suptitle(f'{title}{title_add}', y=ypos)
plt.tight_layout()
return gs | 11a0276ab1eac233db537943b5af67f0452f89db | 8,288 |
def ajax_user_search(request):
"""
returns the user search result. currently this is not used since search user feature changed to form post.
"""
if request.method=='POST':
username=request.POST.get('username','')
users=User.objects.filter(username__contains=username)
try:
brand=int(request.POST['company'])
users=users.filter(userprofile__work_for=brand)
except:
pass
return render_to_response('ajax/user_search.html', {'users':users,}, mimetype='text/html') | 8318b881280e47ff28ea8db259df607b1e5bf7fb | 8,289 |
def shortest_path(start, end):
"""
Using 2-way BFS, finds the shortest path from start_position to
end_position. Returns a list of moves.
You can use the rubik.quarter_twists move set.
Each move can be applied using rubik.perm_apply
"""
if start == (7, 8, 6, 20, 18, 19, 3, 4, 5, 16, 17, 15, 0, 1, 2, 14, 12, 13, 10, 11, 9, 21, 22, 23):
return None
return bfs((start, None), end) | c75b54b434c09f6d570f79c40453bd465a2a439b | 8,290 |
import torch
def prep_param_lists(model, flat_master=False):
"""
Creates a list of FP32 master parameters for a given model, as in
`Training Neural Networks with Mixed Precision: Real Examples`_.
Args:
model (torch.nn.Module): Existing Pytorch model
flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization.
Returns:
A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element.
Example::
model_params, master_params = prep_param_lists(model)
.. warning::
Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`.
.. _`Training Neural Networks with Mixed Precision: Real Examples`:
http://on-demand.gputechconf.com/gtc/2018/video/S81012/
"""
model_params = [param for param in model.parameters() if param.requires_grad]
if flat_master:
# Give the user some more useful error messages
try:
# flatten_dense_tensors returns a contiguous flat array.
# http://pytorch.org/docs/master/_modules/torch/_utils.html
master_params = _flatten_dense_tensors(
[param.data for param in model_params]
).float()
except BaseException:
print(
"Error in prep_param_lists: model may contain a mixture of parameters "
"of different types. Use flat_master=False, or use F16_Optimizer."
)
raise
master_params = torch.nn.Parameter(master_params)
master_params.requires_grad = True
# master_params.register_hook(backwards_debug_hook)
if master_params.grad is None:
master_params.grad = master_params.new(*master_params.size())
return model_params, [master_params]
else:
master_params = [param.clone().float().detach() for param in model_params]
for param in master_params:
param.requires_grad = True
return model_params, master_params | 36ec1b264af2b05c4fc13a295d007e0ac830b821 | 8,291 |
def to_matrix_vector(transform):
"""
Code from nilearn module, available at: https://github.com/nilearn/nilearn/blob/master/nilearn/image/resampling.py
Split an homogeneous transform into its matrix and vector components.
The transformation must be represented in homogeneous coordinates.
It is split into its linear transformation matrix and translation vector
components.
This function does not normalize the matrix. This means that for it to be
the inverse of from_matrix_vector, transform[-1, -1] must equal 1, and
transform[-1, :-1] must equal 0.
Parameters
----------
transform: numpy.ndarray
Homogeneous transform matrix. Example: a (4, 4) transform representing
linear transformation and translation in 3 dimensions.
Returns
-------
matrix, vector: numpy.ndarray
The matrix and vector components of the transform matrix. For
an (N, N) transform, matrix will be (N-1, N-1) and vector will be
a 1D array of shape (N-1,).
See Also
--------
from_matrix_vector
"""
ndimin = transform.shape[0] - 1
ndimout = transform.shape[1] - 1
matrix = transform[0:ndimin, 0:ndimout]
vector = transform[0:ndimin, ndimout]
return matrix, vector | b971f3b53199a16bbf2343ed544389cbc21f1644 | 8,292 |
def sieveEr(N):
"""
input: positive integer 'N' > 2
returns a list of prime numbers from 2 up to N.
This function implements the algorithm called
sieve of erathostenes.
"""
# precondition
assert isinstance(N,int) and (N > 2), "'N' must been an int and > 2"
# beginList: conatins all natural numbers from 2 upt to N
beginList = [x for x in range(2,N+1)]
ans = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(beginList)):
for j in range(i+1,len(beginList)):
if (beginList[i] != 0) and \
(beginList[j] % beginList[i] == 0):
beginList[j] = 0
# filters actual prime numbers.
ans = [x for x in beginList if x != 0]
# precondition
assert isinstance(ans,list), "'ans' must been from type list"
return ans | 8d48d2a491341d5302307597ad64ac4a37b1abb8 | 8,293 |
def validate_fields(item, fields=None):
"""
Check that all requested fields were returned
:param item: comment or submission
:param fields: list[str]
:return: list[str]
"""
actual_fields = item.d_.keys()
if fields is None:
requested_fields = actual_fields
else:
requested_fields = fields
missing_fields = set(requested_fields).difference(actual_fields)
# drop extra fields returned from api
final_fields = set(requested_fields).intersection(actual_fields)
return final_fields, missing_fields | 88bd6d20ba1cc04f8478128f7f32192ef680762b | 8,294 |
import warnings
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None, target_imbalance_ratio=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, y,
target_imbalance_ratio, verbose)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree | a5c027d96bd96522544e56bd427ae39a5075e6b8 | 8,295 |
from typing import Iterable
def remove_nones(sequence: Iterable) -> list:
"""Removes elements where bool(x) evaluates to False.
Examples
--------
Normal usage::
remove_nones(['m', '', 'l', 0, 42, False, True])
# ['m', 'l', 42, True]
"""
# Note this is redundant with it.chain
return [x for x in sequence if x] | 975c0104b3cc05bb82fa211c1b85b49c7d3cb174 | 8,296 |
import glob
import os
def writeSConscript(dirpath, profile, pkeys):
""" Create a SConscript file in dirpath.
"""
# Activate modules
mods, defines = collectModules(dirpath, pkeys)
if validKey('CONFIG', pkeys) and isComplicated(pkeys['CONFIG'][0]):
return False
qrcname = ""
if not validKey('SOURCES', pkeys):
# No SOURCES specified, try to find CPP files
slist = glob.glob(os.path.join(dirpath,'*.cpp'))
if len(slist) == 0:
# Nothing to build here
return False
else:
# Scan for Q_INIT_RESOURCE
for s in slist:
qrcname = findQResourceName(s)
if qrcname:
break
allmods = True
for m in mods:
if m not in pkeys['qtmodules']:
print(" no module %s" % m)
allmods = False
if not allmods:
return False
sc = open(os.path.join(dirpath,'SConscript'),'w')
sc.write("""Import('qtEnv')
env = qtEnv.Clone()
""")
if len(mods):
sc.write('env.EnableQt5Modules([\n')
for m in mods[:-1]:
sc.write("'%s',\n" % m)
sc.write("'%s'\n" % mods[-1])
sc.write('])\n\n')
# Add CPPDEFINEs
if len(defines):
sc.write('env.AppendUnique(CPPDEFINES=[\n')
for d in defines[:-1]:
sc.write("'%s',\n" % d)
sc.write("'%s'\n" % defines[-1])
sc.write('])\n\n')
# Add LIBS
if validKey('LIBS', pkeys):
sc.write('env.AppendUnique(LIBS=[\n')
for d in pkeys['LIBS'][:-1]:
sc.write("'%s',\n" % d)
sc.write("'%s'\n" % pkeys['LIBS'][-1])
sc.write('])\n\n')
# Collect INCLUDEPATHs
incpaths = []
if validKey('INCLUDEPATH', pkeys):
incpaths = pkeys['INCLUDEPATH']
if validKey('FORMS', pkeys):
for s in pkeys['FORMS']:
head, tail = os.path.split(s)
if head and head not in incpaths:
incpaths.append(head)
if incpaths:
sc.write('env.Append(CPPPATH=[\n')
for d in incpaths[:-1]:
sc.write("'%s',\n" % relOrAbsPath(dirpath, d))
sc.write("'%s'\n" % relOrAbsPath(dirpath, incpaths[-1]))
sc.write('])\n\n')
# Add special environment flags
if len(qtenv_flags):
for key, value in list(qtenv_flags.items()):
sc.write("env['%s']=%s\n" % (key, value))
# Write source files
if validKey('SOURCES', pkeys):
sc.write('source_files = [\n')
for s in pkeys['SOURCES'][:-1]:
sc.write("'%s',\n" % relOrAbsPath(dirpath, s))
if not qrcname:
qrcname = findQResourceName(os.path.join(dirpath,s))
sc.write("'%s'\n" % relOrAbsPath(dirpath, pkeys['SOURCES'][-1]))
if not qrcname:
qrcname = findQResourceName(os.path.join(dirpath,pkeys['SOURCES'][-1]))
sc.write(']\n\n')
# Write .ui files
if validKey('FORMS', pkeys):
sc.write('ui_files = [\n')
for s in pkeys['FORMS'][:-1]:
sc.write("'%s',\n" % relOrAbsPath(dirpath, s))
sc.write("'%s'\n" % relOrAbsPath(dirpath, pkeys['FORMS'][-1]))
sc.write(']\n')
sc.write('env.Uic5(ui_files)\n\n')
# Write .qrc files
if validKey('RESOURCES', pkeys):
qrc_name = pkeys['RESOURCES'][0]
if qrcname:
if qrc_name.endswith('.qrc'):
qrc_name = qrc_name[:-4]
sc.write("qrc_out = env.Qrc5('%s')\nsource_files.append(qrc_out)\nenv['QT5_QRCFLAGS'] = ['-name', '%s']\n" % (qrc_name, qrcname))
else:
if not qrc_name.endswith('.qrc'):
qrc_name += '.qrc'
sc.write("source_files.append('%s')\n" % qrc_name)
# Select module
type = 'Program'
if validKey('TEMPLATE', pkeys):
if pkeys['TEMPLATE'][0] == 'lib':
type = 'StaticLibrary'
if pkeys['TEMPLATE'][0] == 'dll':
type = 'SharedLibrary'
# TARGET may be wrapped by qtLibraryTarget function...
target = profile
if validKey('TARGET', pkeys):
t = pkeys['TARGET'][0]
m = qtlib_re.search(t)
if m:
t = "Qt" + m.group(1)
target = t.replace("$$TARGET", profile)
# Create program/lib/dll
else:
if validKey('SOURCES', pkeys):
sc.write("env.%s('%s', source_files)\n\n" % (type, target))
else:
sc.write("env.%s('%s', Glob('*.cpp'))\n\n" % (type, target))
sc.close()
return True | 36766bcb0f1e7d88b4df6500cb758b70c7c63600 | 8,297 |
from typing import List
import pathlib
def retrieve(passed: List[str]) -> List[str]:
"""
Retrieves all items that are able to be
converted, recursively, from the passed list.
Parameters
----------
passed: List[str]
The items to search.
Returns
-------
List[str]:
All found items.
"""
ret = []
for item in passed:
try:
path = pathlib.Path(item)
if path.is_file() and path.suffix == ".txt":
ret += retrieve(path.read_text().split("\n"))
elif path.is_file():
ret.append(str(path))
elif path.is_dir():
ret += retrieve([str(p) for p in path.iterdir()])
else:
ret.append(item)
except OSError:
ret.append(item)
return ret | 6789255e302caf9dc6e481df532acec20dfc6b3c | 8,298 |
from typing import List
from typing import Optional
from typing import Dict
from typing import Tuple
from typing import Any
def get_out_of_sample_best_point_acqf(
model: Model,
Xs: List[Tensor],
X_observed: Tensor,
objective_weights: Tensor,
mc_samples: int = 512,
fixed_features: Optional[Dict[int, float]] = None,
fidelity_features: Optional[List[int]] = None,
target_fidelities: Optional[Dict[int, float]] = None,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
seed_inner: Optional[int] = None,
qmc: bool = True,
**kwargs: Any,
) -> Tuple[AcquisitionFunction, Optional[List[int]]]:
"""Picks an appropriate acquisition function to find the best
out-of-sample (predicted by the given surrogate model) point
and instantiates it.
NOTE: Typically the appropriate function is the posterior mean,
but can differ to account for fidelities etc.
"""
model = model
# subset model only to the outcomes we need for the optimization
if kwargs.get(Keys.SUBSET_MODEL, True):
subset_model_results = subset_model(
model=model,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
)
model = subset_model_results.model
objective_weights = subset_model_results.objective_weights
outcome_constraints = subset_model_results.outcome_constraints
fixed_features = fixed_features or {}
target_fidelities = target_fidelities or {}
if fidelity_features:
# we need to optimize at the target fidelities
if any(f in fidelity_features for f in fixed_features):
raise RuntimeError("Fixed features cannot also be fidelity features.")
elif set(fidelity_features) != set(target_fidelities):
raise RuntimeError(
"Must provide a target fidelity for every fidelity feature."
)
# make sure to not modify fixed_features in-place
fixed_features = {**fixed_features, **target_fidelities}
elif target_fidelities:
raise RuntimeError(
"Must specify fidelity_features in fit() when using target fidelities."
)
acqf_class, acqf_options = pick_best_out_of_sample_point_acqf_class(
outcome_constraints=outcome_constraints,
mc_samples=mc_samples,
qmc=qmc,
seed_inner=seed_inner,
)
objective, posterior_transform = get_botorch_objective_and_transform(
model=model,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
X_observed=X_observed,
)
if objective is not None:
if not isinstance(objective, MCAcquisitionObjective):
raise UnsupportedError(
f"Unknown objective type: {objective.__class__}" # pragma: nocover
)
acqf_options = {"objective": objective, **acqf_options}
if posterior_transform is not None:
acqf_options = {"posterior_transform": posterior_transform, **acqf_options}
acqf = acqf_class(model=model, **acqf_options) # pyre-ignore [45]
if fixed_features:
acqf = FixedFeatureAcquisitionFunction(
acq_function=acqf,
d=X_observed.size(-1),
columns=list(fixed_features.keys()),
values=list(fixed_features.values()),
)
non_fixed_idcs = [i for i in range(Xs[0].size(-1)) if i not in fixed_features]
else:
non_fixed_idcs = None
return acqf, non_fixed_idcs | a6331759833b4715275fdb3ca7d19c237c2c7e55 | 8,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.