content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def pattern():
"""Start a pattern
Expected arguments are: name, delay, pause
"""
if request.args.get('name') is None:
return ''
pattern = request.args.get('name')
delay = float(request.args.get('delay', 0.1))
pause = float(request.args.get('pause', 0.5))
LightsController.start_pattern(pattern=pattern, delay=delay, pause=pause)
return ''
|
13d1ff59dbd4521b157ab28bae75fed30378f8c5
| 30,881 |
def smow(t):
"""
Density of Standard Mean Ocean Water (Pure Water) using EOS 1980.
Parameters
----------
t : array_like
temperature [℃ (ITS-90)]
Returns
-------
dens(t) : array_like
density [kg m :sup:`3`]
Examples
--------
>>> # Data from UNESCO Tech. Paper in Marine Sci. No. 44, p22.
>>> import seawater as sw
>>> t = T90conv([0, 0, 30, 30, 0, 0, 30, 30])
>>> sw.smow(t)
array([ 999.842594 , 999.842594 , 995.65113374, 995.65113374,
999.842594 , 999.842594 , 995.65113374, 995.65113374])
References
----------
.. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp. Eqn.(31) p.39.
http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf
.. [2] Millero, F.J. and Poisson, A. International one-atmosphere equation
of state of seawater. Deep-Sea Res. 1981. Vol28A(6) pp625-629.
doi:10.1016/0198-0149(81)90122-9
"""
t = np.asanyarray(t)
a = (999.842594, 6.793952e-2, -9.095290e-3, 1.001685e-4, -1.120083e-6,
6.536332e-9)
T68 = T68conv(t)
return (a[0] + (a[1] + (a[2] + (a[3] + (a[4] + a[5] * T68) * T68) * T68) *
T68) * T68)
|
1f7ae913a1f4c71493d7d94d04bf543e6ffff72b
| 30,882 |
from typing import Optional
from typing import Tuple
import torch
def generate_change_image_given_dlatent(
dlatent: np.ndarray,
generator: networks.Generator,
classifier: Optional[MobileNetV1],
class_index: int,
sindex: int,
s_style_min: float,
s_style_max: float,
style_direction_index: int,
shift_size: float,
label_size: int = 2,
num_layers: int = 14
) -> Tuple[np.ndarray, float, float]:
"""Modifies an image given the dlatent on a specific S-index.
Args:
dlatent: The image dlatent, with sape [dlatent_size].
generator: The generator model. Either StyleGAN or GLO.
classifier: The classifier to visualize.
class_index: The index of the class to visualize.
sindex: The specific style index to visualize.
s_style_min: The minimal value of the style index.
s_style_max: The maximal value of the style index.
style_direction_index: If 0 move s to it's min value otherwise to it's max
value.
shift_size: Factor of the shift of the style vector.
label_size: The size of the label.
Returns:
The image after the style index modification, and the output of
the classifier on this image.
"""
expanded_dlatent_tmp = torch.tile(dlatent.unsqueeze(1),[1, num_layers, 1])
network_inputs = generator.synthesis.style_vector_calculator(expanded_dlatent_tmp)
style_vector = torch.cat(generator.synthesis.style_vector_calculator(expanded_dlatent_tmp)[1], dim=1).numpy()
orig_value = style_vector[0, sindex]
target_value = (s_style_min if style_direction_index == 0 else s_style_max)
if target_value == orig_value:
weight_shift = shift_size
else:
weight_shift = shift_size * (target_value - orig_value)
layer_idx, in_idx = sindex_to_layer_idx_and_index(network_inputs[1], sindex)
layer_one_hot = torch.nn.functional.one_hot(torch.Tensor([in_idx]).to(int), network_inputs[1][layer_idx].shape[1])
network_inputs[1][layer_idx] += (weight_shift * layer_one_hot)
svbg_new = group_new_style_vec_block(network_inputs[1])
images_out = generator.synthesis.image_given_dlatent(expanded_dlatent_tmp, svbg_new)
images_out = torch.maximum(torch.minimum(images_out, torch.Tensor([1])), torch.Tensor([-1]))
change_image = torch.tensor(images_out.numpy())
result = classifier(change_image)
change_prob = nn.Softmax(dim=1)(result).detach().numpy()[0, class_index]
change_image = change_image.permute(0, 2, 3, 1)
return change_image, change_prob
|
89ad94dd6f74c175ede27712046d3c46ba43143c
| 30,884 |
def _build_square(A, B, C, D):
"""Build a matrix from submatrices
A B
C D
"""
return np.vstack((
np.hstack((A, B)),
np.hstack((C, D))
))
|
510b39f433023339f977a665c055f60abe46a160
| 30,886 |
def DataFrame_to_AsciiDataTable(pandas_data_frame,**options):
"""Converts a pandas.DataFrame to an AsciiDataTable"""
# Set up defaults and pass options
defaults={}
conversion_options={}
for key,value in defaults.items():
conversion_options[key]=value
for key,value in options.items():
conversion_options[key]=value
conversion_options["column_names"]=pandas_data_frame.columns.tolist()[:]
conversion_options["data"]=pandas_data_frame.values.tolist()[:]
conversion_options["column_types"]=[str(x) for x in pandas_data_frame.dtypes.tolist()[:]]
new_table=AsciiDataTable(None,**conversion_options)
return new_table
|
2864440528324e00e5d7389b5cc2b04aecbb833b
| 30,888 |
def generate_fps_from_reaction_products(reaction_smiles, fp_data_configs):
""" Generates specified fingerprints for the both reactive and non-reactive substructures of the reactant and
product molecules that are the participating in the chemical reaction. """
# Generate the RDKit Mol representations of the product molecules and generate the reaction cores.
reactants, _, products = parse_reaction_roles(reaction_smiles, as_what="mol_no_maps")
reaction_cores = get_reaction_core_atoms(reaction_smiles)
# Separate the reaction cores if they consist out of multiple non-neighbouring parts.
separated_cores = get_separated_cores(reaction_smiles, reaction_cores)
# Define variables which will be used for storing the results.
total_reactive_fps, total_non_reactive_fps = [], []
# Iterate through the product molecules and generate fingerprints for all reactive and non-reactive substructures.
for p_ind, product in enumerate(products):
# Iterate through all of the dataset configurations.
for fp_config in fp_data_configs:
reactive_fps, non_reactive_fps = [], []
# Generate fingerprints from the reactive substructures i.e. the reaction core(s).
for core in separated_cores[1][p_ind]:
# Generate reactive EC fingerprints and add them to the list.
if fp_config["type"] == "ecfp":
reactive_fps.append(construct_ecfp(product, radius=fp_config["radius"], bits=fp_config["bits"],
from_atoms=core, output_type="np_array", as_type="np_float"))
# Generate reactive HS fingerprints and add them to the list.
else:
reactive_fps.append(construct_hsfp(product, radius=fp_config["radius"], bits=fp_config["bits"],
from_atoms=core, neighbourhood_ext=fp_config["ext"]))
# Generate the extended environment of the reaction core.
extended_core_env = get_atom_environment(reaction_cores[1][p_ind], product, degree=1)
# Generate fingerprints from the non-reactive substructures i.e. non-reaction core substructures.
for bond in product.GetBonds():
# Generate the extended environment of the focus bond.
extended_bond_env = get_bond_environment(bond, product, degree=1)
# If the extended environment of the non-reactive substructure does not overlap with the extended
# reaction core, generate a non-reactive fingerprint representation.
if not extended_bond_env.intersection(extended_core_env):
# Generate non-reactive EC fingerprints and add them to the list.
if fp_config["type"] == "ecfp":
non_reactive_fps.append(
construct_ecfp(product, radius=fp_config["radius"], bits=fp_config["bits"],
from_atoms=[bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()],
output_type="np_array", as_type="np_float"))
# Generate non-reactive HS fingerprints and add them to the list.
else:
non_reactive_fps.append(
construct_hsfp(product, radius=fp_config["radius"], bits=fp_config["bits"],
from_atoms=[bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()],
neighbourhood_ext=fp_config["ext"]))
# Append the generated fingerprints to the final list.
total_reactive_fps.append(reactive_fps)
total_non_reactive_fps.append(non_reactive_fps)
# Return all of the generated fingerprints and labels.
return total_reactive_fps, total_non_reactive_fps
|
42c4777dcf9c306cd45f9e94bbf18c0d1768c59b
| 30,891 |
import torch
def count_acc(logits, label):
"""The function to calculate the .
Args:
logits: input logits.
label: ground truth labels.
Return:
The output accuracy.
"""
pred = F.softmax(logits, dim=1).argmax(dim=1)
if torch.cuda.is_available():
return (pred == label).type(torch.cuda.FloatTensor).mean().item()
return (pred == label).type(torch.FloatTensor).mean().item()
|
2f34be0cfb52a438c66b36d1d653ecbd72d559e2
| 30,892 |
def cuda_argmin(a, axis):
""" Location of minimum GPUArray elements.
Parameters:
a (gpu): GPUArray with the elements to find minimum values.
axis (int): The dimension to evaluate through.
Returns:
gpu: Location of minimum values.
Examples:
>>> a = cuda_argmin(cuda_give([[1, 2, 3], [6, 5, 4]]), axis=1)
array([[0],
[2]], dtype=uint32)
>>> type(a)
<class 'pycuda.gpuarray.GPUArray'>
"""
return skcuda.misc.argmin(a, axis, keepdims=True)
|
25231969616e5c14736757a7b13f058ee218b6aa
| 30,893 |
def _process_columns(validated_data, context):
"""Process the used_columns field of a serializer.
Verifies if the column is new or not. If not new, it verifies that is
compatible with the columns already existing in the workflow
:param validated_data: Object with the parsed column items
:param context: dictionary with additional objects for serialization
:return: List of new columns
"""
new_columns = []
for citem in validated_data:
cname = citem.get('name')
if not cname:
raise Exception(
_('Incorrect column name {0}.').format(cname))
# Search for the column in the workflow columns
col = context['workflow'].columns.filter(name=cname).first()
if not col:
# Accumulate the new columns just in case we have to undo
# the changes
if citem['is_key']:
raise Exception(
_('Action contains non-existing key column "{0}"').format(
cname))
new_columns.append(citem)
continue
# Processing an existing column. Check data type compatibility
is_not_compatible = (
col.data_type != citem.get('data_type')
or col.is_key != citem['is_key']
or set(col.categories) != set(citem['categories'])
)
if is_not_compatible:
# The two columns are different
raise Exception(_(
'Imported column {0} is different from existing '
+ 'one.').format(cname))
# Update the column categories (just in case the new one has a
# different order)
col.set_categories(citem['categories'])
return _create_columns(new_columns, context)
|
cae79dda5e5121d4684e0995034050e9c6c45598
| 30,894 |
def module_of_callable(c):
"""Find name of module where callable is defined
Arguments:
c {Callable} -- Callable to inspect
Returns:
str -- Module name (as for x.__module__ attribute)
"""
# Ordinal function defined with def or lambda:
if type(c).__name__ == 'function':
return c.__module__
# Some callable, probably it's a class with __call_ method, so define module of declaration rather than a module of instantiation:
return c.__class__.__module__
|
116e46a3e75fcd138e271a3413c62425a9fcec3b
| 30,895 |
def lung_seg(input_shape, num_filters=[16,32,128], padding='same') :
"""Generate CN-Net model to train on CT scan images for lung seg
Arbitrary number of input channels and output classes are supported.
Arguments:
input_shape - (? (number of examples),
input image height (pixels),
input image width (pixels),
input image features (1 for grayscale, 3 for RGB))
num_filters - number of filters (exactly 4 should be passed)
padding - 'same' or 'valid'
Output:
CN-Net model expecting input shape (height, width, channels) and generates
output with the same shape (height, width, channels).
"""
x_input = Input(input_shape)
### LUNG SEGMENTATION
x = Conv2D(num_filters[0], kernel_size=3, activation='relu', padding=padding)(x_input)
x = MaxPooling2D(pool_size=2, padding=padding)(x)
x = Conv2D(num_filters[1], kernel_size=3, activation='relu', padding=padding)(x)
x = MaxPooling2D(pool_size=2, padding=padding)(x)
x = Conv2D(num_filters[2], kernel_size=3, activation='relu', padding=padding)(x)
x = MaxPooling2D(pool_size=2, padding=padding)(x)
x = Dense(num_filters[2], activation='relu')(x)
x = UpSampling2D(size=2)(x)
x = Conv2D(num_filters[2], kernel_size=3, activation='sigmoid', padding=padding)(x)
x = UpSampling2D(size=2)(x)
x = Conv2D(num_filters[1], kernel_size=3, activation='sigmoid', padding=padding)(x)
x = UpSampling2D(size=2)(x)
lung_seg = Conv2D(1, kernel_size=3, activation='sigmoid',
padding=padding)(x) # identifying lungs
lseg = Model(inputs=x_input, outputs=lung_seg, name='lung_seg')
return lseg
|
67cf286122c40e7fa2f87fc1e0a2f57e97777e32
| 30,896 |
def set_cluster_status(event, context):
"""Set the status of a cluster, ie active, inactive, maintainance_mode, etc"""
try:
cluster_status = event['queryStringParameters']['cluster_status']
except:
return {
"statusCode": 500,
"body": {"message": f'Must provide a status variable in uri query string'}
}
try:
cluster_name = event['queryStringParameters']['cluster_name']
except:
return {
"statusCode": 500,
"body": {"message": f'Must provide a cluster_name variable in uri query string'}
}
try:
CLUSTER_TABLE.update_item(
Key={
'id': cluster_name,
},
UpdateExpression="set cluster_status = :r",
ExpressionAttributeValues={
':r': cluster_status
},
ReturnValues="UPDATED_NEW"
)
return {
"statusCode": 200,
"body": {"message": f'Updated cluster status for {cluster_name} to {cluster_status}'}
}
except:
print(f'Falied to update cluster status for {cluster_name}')
return {
"statusCode": 500,
"body": {"message": f'Falied to update cluster status for {cluster_name}'}
}
|
dbb4215c19b8a241d8d353f3567a19eca32190dc
| 30,897 |
import numpy
def rep(x, n):
""" interpolate """
z = numpy.zeros(len(x) * n)
for i in range(len(x)):
for j in range(n):
z[i * n + j] = x[i]
return z
|
97c2ba7e48ff365fb6b4cebcee3f753169cd4670
| 30,898 |
def insert_new_datamodel(database: Database, data_model):
"""Insert a new datamodel in the datamodels collection."""
if "_id" in data_model:
del data_model["_id"]
data_model["timestamp"] = iso_timestamp()
return database.datamodels.insert_one(data_model)
|
b841e9e08e269cda60d261857bc8826b6a614814
| 30,899 |
async def infer_scatter_add(
self,
engine,
input: lib.AbstractArray,
dim: xtype.UInt[64],
index: lib.AbstractArray,
src: lib.AbstractArray,
):
"""Infer the return type of primitive `scatter_add`."""
return input
|
1ce75e1e7d79ca89a4b467d96a1eb8c70b75fbee
| 30,900 |
def eintr_retry(exc_type, f, *args, **kwargs):
"""Calls a function. If an error of the given exception type with
interrupted system call (EINTR) occurs calls the function again.
"""
while True:
try:
return f(*args, **kwargs)
except exc_type as exc:
if exc.errno != EINTR:
raise
else:
break
|
a4bf9e6ce3539226c1e963e59cce535ac57ac02c
| 30,902 |
def dot2string(dot):
"""Return a string repr of dots."""
return "*" * int(dot)
|
e2822bfe20dab5702ec4052445718398e66d993e
| 30,903 |
from typing import Optional
from typing import Dict
from typing import Any
import random
def generate_visited_place(
user_email: Optional[str] = None,
place_uid: Optional[str] = None,
place_id: Optional[int] = None,
latitude: Optional[float] = None,
longitude: Optional[float] = None,
) -> Dict[str, Any]:
"""
Создает и возвращает пользовательское
посещенное место, автоматически генерируя
данные для не указанных полей.
"""
if user_email is None:
user_email = fake.email()
if place_uid is None:
place_uid = f'ymapsbm1://org?oid={random.randint(0, (10 ** 11) - 1)}'
if place_id is None:
place_id = random.randint(0, (10 ** 10) - 1)
if latitude is None:
latitude = float(fake.latitude())
if longitude is None:
longitude = float(fake.longitude())
return locals()
|
e58de94b838512c642ba2a9c23bf87a9e50227bd
| 30,904 |
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') /( cm.sum(axis=1)[:, np.newaxis] + 1e-16)
# print("Normalized confusion matrix")
else:
pass
# print('Confusion matrix, without normalization')
# print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
|
3054b6ffe04c5bef18657dfd8e00e9f798689533
| 30,906 |
def authenticate_key(api_key):
"""
Authenticate an API key against our database
:param api_key:
:return: authenticated username
"""
user_model = Query()
user = db.search(user_model.api_key == api_key)[0]
if user:
return user["username"]
return False
|
8d6e129e2e234730d629393b1398003eb7fa8361
| 30,907 |
def verify_ospf3_neighbor_number(device,
expected_interface=None,
expected_number=None,
expected_state=None,
extensive=False,
max_time=60,
check_interval=10):
""" Verifies the number of ospf3 neighbors that meets the criteria
Args:
device ('obj'): device to use
expected_interface ('str'): Interface to use
expected_number ('str'): State occurrence
expected_state ('str'): Interface state
extensive('bool'): Flag to differentiate show commands. Defaults to False.
max_time ('int'): Maximum time to keep checking
check_interval ('int'): How often to check
Returns:
Boolean
Raises:
N/A
"""
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
try:
if not extensive:
out = device.parse("show ospf3 neighbor")
else:
out = device.parse("show ospf3 neighbor extensive")
except SchemaEmptyParserError:
timeout.sleep()
continue
# {
# "ospf3-neighbor-information": {
# "ospf3-neighbor": [
# {
# "interface-name": "ge-0/0/0.0",
# "ospf-neighbor-state": "Full",
# },
count = 0
for neighbor in out.q.get_values('ospf3-neighbor'):
# check variables
interface_name = neighbor.get('interface-name', None)
if expected_interface and expected_interface != interface_name:
continue
neighbor_state = neighbor.get('ospf-neighbor-state', None)
if expected_state and expected_state.lower(
) != neighbor_state.lower():
continue
# if all variables exist, count plus 1
count += 1
if count == expected_number:
return True
timeout.sleep()
return False
|
6540fa2425553672cd4b34fe0a112da3333d2c46
| 30,908 |
def create_and_clone_vcs_repo(orgname, reponame, tmpdir, testname=None):
"""
Creates a VCS org
Create a repo in that org
Clones that repo into a subdirectory of tmpdir
Returns the cloned repo directory path
"""
if testname == None:
description = "Created by CMS VCS test library"
else:
description = "Created by CMS %s test" % testname
info("Creating VCS org %s" % orgname)
create_vcs_org(description=description, orgname=orgname)
info("Created VCS org %s" % orgname)
info("Creating VCS repo %s in org %s" % (reponame, orgname))
create_vcs_repo(orgname=orgname, reponame=reponame, description=description)
info("Created VCS repo %s in org %s" % (reponame, orgname))
repodir = clone_vcs_repo(orgname=orgname, reponame=reponame, tmpdir=tmpdir)
return repodir
|
fc6009c8fa4a4d89cc85d14d65178b59cdfe06e0
| 30,909 |
def graph(x, y, xerr=None, yerr=None):
"""Create a ROOT TGraph from array-like input.
Parameters
----------
x, y : float or array-like, shape (n, )
The data positions.
xerr, yerr : float or array-like, shape (n, ), optional
Error bar sizes in the *x* and *y* directions. The default is `None`, in which
case no error bars are added in this direction.
Returns
-------
ROOT.TGraph or ROOT.TGraphErrors
TGraph object created from input arrays.
"""
# Convert input arrays to flattened, double-precision, contiguous arrays
x = np.ravel(x).astype(np.float64, order="C", copy=False)
y = np.ravel(y).astype(np.float64, order="C", copy=False)
if x.size != y.size:
raise ValueError("x and y must be the same size")
# Process x errors
if xerr is not None:
if not np.iterable(xerr):
xerr = np.ones_like(x) * xerr
xerr = np.ravel(xerr).astype(np.float64, order="C", copy=False)
if xerr.size != x.size:
raise ValueError("xerr must be same size as x")
# Process y errors
if yerr is not None:
if not np.iterable(yerr):
yerr = np.ones_like(y) * yerr
yerr = np.ravel(yerr).astype(np.float64, order="C", copy=False)
if yerr.size != y.size:
raise ValueError("yerr must be same size as y")
# Create graph object
if xerr is None and yerr is None:
graph = root.TGraph(x.size, x, y)
elif xerr is not None and yerr is None:
graph = root.TGraphErrors(x.size, x, y, xerr, root.nullptr)
elif xerr is None and yerr is not None:
graph = root.TGraphErrors(x.size, x, y, root.nullptr, yerr)
elif xerr is not None and yerr is not None:
graph = root.TGraphErrors(x.size, x, y, xerr, yerr)
return graph
|
27222a556077e29a0240ae9bbc559639bbe5a041
| 30,910 |
def qUri(x):
"""Resolve URI for librdf."""
return resolve_uri(x, namespaces=RDF_NAMESPACES)
|
4a27ebcd57cd9937311b7086d69a5e819704de9b
| 30,911 |
def uninstall():
"""Uninstaller for pimessage"""
status = 0
try:
shutil.rmtree(data_dir, ignore_errors=True)
except OSError:
print 'Error in removing ~/.pimessage'
return 1
# Remove daemon from .profile
try:
_profile = os.path.join(utils.get_home_dir(), '.profile')
with open(_profile, 'r') as fname:
buf = fname.read()
# process buffer
lines = buf.split('\n')
dir_path = os.path.abspath(os.path.dirname(sys.argv[0]))
daemon_line0 = '#start pimessage daemon'
daemon_line1 = dir_path+'/pmdaemon.py &'
daemon_line2 = dir_path+'/pmdaemon.py -f'
lines_to_append = []
for line in lines:
if (line != daemon_line0 and line != daemon_line1 and
line != daemon_line2):
lines_to_append.append(line)
buf = '\n'.join(lines_to_append)
with open(_profile, 'w') as fname:
fname.write(buf)
except Exception as err:
print 'Error in handling ~/.profile'
print '%s' % str(err)
status = 1
# Remove pimessage alias from .bashrc
try:
_bashrc = os.path.join(utils.get_home_dir(), '.bashrc')
with open(_bashrc, 'r') as fname:
buf = fname.read()
# process buffer
lines = buf.split('\n')
alias_line0 = '# For PiMessage -- do not delete'
alias_line1 = 'alias pimessage='+dir_path+'/pimessage.py'
lines_to_append = []
for line in lines:
if line != alias_line0 and line != alias_line1:
lines_to_append.append(line)
buf = '\n'.join(lines_to_append)
with open(_bashrc, 'w') as fname:
fname.write(buf)
except Exception as err:
print 'Error in handling ~/.bashrc'
print '%s' % str(err)
status = 1
if status != 0:
print 'Error removing PiMessage.'
else:
print 'PiMessage has been successfully uninstalled.'
return status
|
d4aac428d12304fa72a33ce7634a5b43f52a6ec8
| 30,912 |
import re
def regex(pattern: str) -> Parser:
"""Regex function.
Returns a function that parses the beginning of the
received string with the regular expression pattern.
Parameters
----------
pattern: str
a regular expression string.
Example
-------
>>> from simpleparser import regex
>>> num = regex("([1-9][0-9]*)")
>>> num.exec('2014a')
['2014']
>>> num.exec('abc')
parse error at (0): unexpected abc expecting ([1-9][0-9]*) (by regex)
"""
name: str = f"regex {pattern}"
def f(self: PrimitiveParser, target: str,
position: int = 0) -> ParseResult:
m = re.match(pattern, target[position:])
if m:
return Success([m.group()], position + len(m.group()), name=name)
msg = (f"parse error at ({position}):"
f" unexpected {target[position:position + 5]}"
f" expecting {pattern} (by {self.parser_type})")
return Failure(msg, position, name=name)
return PrimitiveParser(f, pattern)
|
9db2bcff825a8f1a8662c34819796eb07816cd31
| 30,913 |
def get_name_scope_name(name):
"""Returns the input name as a unique `tf.name_scope` name."""
if name and name[-1] == '/':
return name
name = strip_invalid_chars(name)
with tf.name_scope(name) as unique_name:
pass
return unique_name
|
4d745839d824646a0c43c8936428e8638dc267b3
| 30,914 |
def read_and_filter_wfdb(filename, length=None):
"""
Reads and filters a signal in Physionet format and returns a numpy array
:param filename: the name of the file
:param length: the length of the file to return or None for max length
:return: the filtered and cut signal as a numpy array
"""
record = wfdb.rdrecord(filename)
if length is None:
signal = record.p_signal
else:
signal = record.p_signal[:length, :]
filt = get_filter()
signal = filter_signal(signal, filt)
return signal
|
d617a9832d2093bc8f1fd41b24920a668fa50287
| 30,915 |
def true_thresholds_out(true_cde, z_delta, expected_prop):
"""
calculates thresholds for the cde to get desired HPD value
Arguments:
----------
true_cde: numpy array (n, d) array of cde values for a range of y values
conditional on a x value (the row value)
z_delta : float, space between y values (assumed equally spaced y values)
expected_prop : numpy vector (p, ) of proportion of mass values desired
to be contained
Returns:
--------
threshold_mat : numpy array (n, p). For each row, we have the cde
thresholds that would allow expected_prop[j] mass to be contained
above this amount
"""
n_row = true_cde.shape[0]
threshold_mat = -1 * np.ones((n_row, expected_prop.shape[0]))
for r_idx in np.arange(n_row):
threshold_mat[r_idx,:] = _true_thresholds_out(cdes = true_cde[r_idx,:].ravel(),
z_delta = z_delta,
expected_prop = expected_prop)
return threshold_mat
|
14eba4fe24ed0e46df4ea0d6986ed870fb084d9e
| 30,918 |
def query_disc(nside, lon, lat, radius, **kwargs):
"""
Wrapper around healpy.query_disc to deal with old healpy implementation.
nside : int
The nside of the Healpix map.
vec : float, sequence of 3 elements
The coordinates of unit vector defining the disk center.
radius : float
The radius (in degrees) of the disk
inclusive : bool, optional
If False, return the exact set of pixels whose pixel centers lie
within the disk; if True, return all pixels that overlap with the disk,
and maybe a few more. Default: False
fact : int, optional
Only used when inclusive=True. The overlapping test will be done at
the resolution fact*nside. For NESTED ordering, fact must be a power of 2,
else it can be any positive integer. Default: 4.
nest: bool, optional
if True, assume NESTED pixel ordering, otherwise, RING pixel ordering
"""
vec = ang2vec(lon, lat)
return healpy.query_disc(nside, vec, np.radians(radius), **kwargs)
|
fb987b327861acfa684a6ccabec745996d545e5d
| 30,920 |
def prepare_audio_file(uploaded_file):
"""
A function to prepare the audio file uploaded by the user.
Input: uploaded file passed by st.file_uploader
Output: float32 numpy
"""
# use pydub to quickly prepare audio segment
a = pydub.AudioSegment.from_file(file=uploaded_file, format=uploaded_file.name.split(".")[-1])
# split channel sounds to mono
channel_sounds = a.split_to_mono()
samples = [s.get_array_of_samples() for s in channel_sounds]
# convert to float32 so audiomentations can augment the file
fp_arr = np.array(samples).T.astype(np.float32)
fp_arr /= np.iinfo(samples[0].typecode).max
return fp_arr[:, 0], a.frame_rate
|
688d326891cc0bfc7628cfcc5c636c181e714ad5
| 30,921 |
def get_item(obj,name):
"""Given a attribute item like 'robots[2].links[4].name', evaluates
the value of the item in the object.
Note: not secure! Uses eval()
"""
loc = {'_w':map(obj)}
result = {}
return eval('_w.'+name,globals(),loc)
|
a802b4cccda11b9a97be6a8bf0fd62024d636a74
| 30,922 |
def generate_bars_dict(H, neg_bars=False):
"""Generate a ground-truth dictionary W suitable for a std. bars test
Creates H bases vectors with horizontal and vertival bars on a R*R pixel grid,
(wth R = H // 2). The function thus returns a matrix storing H dictionaries of
size D=R*R.
:param H: Number of latent variables
:type H: int
:param neg_bars: Should half the bars have a negative value (-1)?
:type neg_bars: bool
:rtype: ndarray (D x H)
Source: https://github.com/ml-uol/prosper/blob/master/prosper/utils/barstest.py::
generate_bars_dict
For LICENSING and COPYRIGHT for this function see prosper's license at:
https://github.com/ml-uol/prosper/blob/master/LICENSE.txt
"""
R = H // 2
D = R ** 2
W_gt = np.zeros((R, R, H))
for i in range(R):
W_gt[i, :, i] = 1.0
W_gt[:, i, R + i] = 1.0
if neg_bars:
sign = 1 - 2 * np.random.randint(2, size=(H,))
W_gt = sign[None, None, :] * W_gt
return W_gt.reshape((D, H))
|
2d0f0ff96507d7fb826ea37467c81d16af37a768
| 30,923 |
def add_node(uuid, state, manage_boot=True, **attributes):
"""Store information about a node under introspection.
All existing information about this node is dropped.
Empty values are skipped.
:param uuid: Ironic node UUID
:param state: The initial state of the node
:param manage_boot: whether to manage boot for this node
:param attributes: attributes known about this node (like macs, BMC etc);
also ironic client instance may be passed under 'ironic'
:returns: NodeInfo
"""
started_at = timeutils.utcnow()
with db.ensure_transaction() as session:
_delete_node(uuid)
version_id = uuidutils.generate_uuid()
db.Node(uuid=uuid, state=state, version_id=version_id,
started_at=started_at, manage_boot=manage_boot).save(session)
node_info = NodeInfo(uuid=uuid, state=state, started_at=started_at,
version_id=version_id, manage_boot=manage_boot,
ironic=attributes.pop('ironic', None))
for (name, value) in attributes.items():
if not value:
continue
node_info.add_attribute(name, value, session=session)
return node_info
|
0765177fdc0ec4acc1926fcfcb40f4d8cd96abee
| 30,924 |
def read_from_hdf5(hdfFile,label,dof_map=None):
"""
Just grab the array stored in the node with label label and return it
If dof_map is not none, use this to map values in the array
If dof_map is not none, this determines shape of the output array
"""
assert hdfFile is not None, "requires hdf5 for heavy data"
vals = hdfFile.get_node(label).read()
if dof_map is not None:
dof = vals[dof_map]
else:
dof = vals
return dof
|
92aa7da786e893d6477c2bcb3c6e3988cbc33558
| 30,925 |
def copy_planning_problem(planning_problem):
"""
Make a copy of a planning problem.
Parameters:
planning_problem (PlanningProblem): A planning problem.
Returns:
(PlanningProblem): A copy of the given planning problem.
"""
copy = PlanningProblem(
initial=planning_problem.initial,
goals=planning_problem.goals,
actions=planning_problem.actions)
return copy
|
03773086f067626c69eb44a319a48cd5c05dad27
| 30,926 |
def remove_pc(x, npc=1):
"""
Remove the projection on the principal components
:param x: X[i,:] is a data point
:param npc: number of principal components to remove
:return: XX[i, :] is the data point after removing its projection
"""
pc = compute_pc(x, npc)
if npc == 1:
xx = x - x.dot(pc.transpose()) * pc
else:
xx = x - x.dot(pc.transpose()).dot(pc)
return xx
|
797d538236f108a1710041c25aea3e88cc202c5f
| 30,927 |
import json
def invocation_parameter(s) :
"""argparse parameter conversion function for invocation request
parameters, basically these parameters are JSON expressions
"""
try :
expr = json.loads(s)
return expr
except :
return str(s)
|
cca1a9c3514def152295b10b17ef44480ccca5a9
| 30,928 |
def build_bhp(bhm, dt_bin_edges, num_fissions = None,
pair_is = 'all', type_is = 'all', print_flag = False):
"""
Build the bicorr_hist_plot by selecting events from bhm and applying normalization factor. The normalization factor is only applied if norm_factor is provided. If not, norm_factor remains at default value 1 and the units are in number of counts.
Parameters
----------
bhm : ndarray
Master histogram of bicorrelation histograms across all detector pairs and interaction types.
Dimension 0: detector pair, use dictionary `dict_pair_to_index` where pair is (100*det1ch+det2ch)
Dimension 1: interaction type, length 4. (0=nn, 1=np, 2=pn, 3=pp)
Dimension 2: dt bin for detector 1
Dimension 3: dt bin for detector 2
dt_bin_edges : ndarray
One-dimensional array of time bin edges
num_fissions : float, optional
Number of fissions for normalization. If provided, then proceed with normalization. If not provided, then no normalization performed
pair_is : list, optional
Indices of selected detector pairs in bhm
type_is : list, optional
Indices of selected interaction types in bhm (0=nn, 1=np, 2=pn, 3=pp)
print_flag : bool, optional
If set to True, print out some information about array size and selections
Returns
-------
bicorr_hist_plot : ndarray
Array to plot. Two-dimensional with axes sizes corresponding to dt_bin_edges x dt_bin_edges.
norm_factor : float
Normalization factor to translate to counts per fission-pair
"""
if print_flag:
print('Creating bicorr_hist_plot for...')
print('pair_is = ',pair_is)
print('type_is = ',type_is)
# If plotting all indices for pair_is or type_is, generate those indices
if pair_is is 'all':
pair_is = np.arange(0,bhm.shape[0])
if type_is is 'all':
type_is = np.arange(0,bhm.shape[1])
# If normalizing, calculate normalization factor
if num_fissions is None:
norm_factor = 1 # No normalization
else: # Normalize by number fissions, detector pairs, and time bin size
norm_factor = num_fissions * len(pair_is) * np.power((dt_bin_edges[1]-dt_bin_edges[0]),2)
# Produce bicorr_hist_plot
bicorr_hist_plot = np.sum(bhm[pair_is,:,:,:][:,type_is,:,:],axis=(0,1)) / norm_factor
if print_flag:
print('time bin width (ns) = ', (dt_bin_edges[1]-dt_bin_edges[0]))
print('length of pair_is = ', len(pair_is))
print('norm_factor = ',norm_factor)
return bicorr_hist_plot, norm_factor
|
083a959755b0c81567af236226c7a321fefdb8b9
| 30,929 |
def env_builder(env_name, env_info, **kwargs):
"""
the interface func for creating environment
:param env_name:the name of environment
:param env_info: the config info of environment
:return:environment instance
"""
return Registers.env[env_name](env_info, **kwargs)
|
ca5f8267bfac46407cd39e3464ff95d765274634
| 30,930 |
def filter_functions(input_set, filter_set):
"""
Keeps only elements in the filter set
:param input_set:
:param filter_set:
:return:
"""
ns = {}
filter_low = {x.lower() for x in filter_set}
for x in input_set:
xl = x.lower()
if xl in filter_low:
ns[x] = input_set[x]
return ns
|
cabc321b6730df4a0c7987b83c6a2c3d6fb69c02
| 30,932 |
from .scattering1d.filter_bank import morlet_1d, gauss_1d
from scipy.fft import ifftshift, ifft
def make_jtfs_pair(N, pair='up', xi0=4, sigma0=1.35):
"""Creates a 2D JTFS wavelet. Used in `wavespin.visuals`."""
morl = morlet_1d(N, xi=xi0/N, sigma=sigma0/N).squeeze()
gaus = gauss_1d(N, sigma=sigma0/N).squeeze()
if pair in ('up', 'dn'):
i0, i1 = 0, 0
elif pair == 'phi_f':
i0, i1 = 1, 0
elif pair in ('phi_t', 'phi_t_dn'):
i0, i1 = 0, 1
elif pair == 'phi':
i0, i1 = 1, 1
else:
supported = {'up', 'dn', 'phi_f', 'phi_t', 'phi', 'phi_t_dn'}
raise ValueError("unknown pair %s; supported are %s" % (
pair, '\n'.join(supported)))
pf_f = (morl, gaus)[i0]
pt_f = (morl, gaus)[i1]
pf_f, pt_f = pf_f.copy(), pt_f.copy()
if pair in ('dn', 'phi_t_dn'):
# time reversal
pf_f[1:] = pf_f[1:][::-1]
pf, pt = [ifftshift(ifft(p)) for p in (pf_f, pt_f)]
Psi = pf[:, None] * pt[None]
return Psi
|
9a3e9c5d5a4a81c62f1ac3a5c7b0d5922eefdb78
| 30,933 |
def convert(your_text):
"""
Changes foot-notes into numbered foot-notes
Args:
your_text (str): a certain text
Returns:
str
"""
# print(f"Hello world, I plan to convert the following text: {your_text}")
### Terminology
# Given a four-line text that looks like the (somewhat mal-formed)
# following:
# ===
# Hola mundo [^1]. Hello world. [^2].
#
# [^a]: Spanish
# [^b]: English
# ===
# I refer to [^1] and [^2] as Pre Footnotes, and
# [^a] and [^b] as Post Footnotes
pre = h.getAllPreFootnotes(your_text)
post = h.getAllPostFootnotes(your_text)
h.ensureAllUnique(pre) # make sure no duplicates occur
h.ensureAllUnique(post)
h.ensureAllPreHasCounterpartAmongPostFootnotes(pre, post) # defensive coding
a_map = h.mapFootnotesToNumbers(pre) # I suppose either Pre Footnotes or Post Footnotes would work as an argument, given the checks above
converted_text = h.replaceFootnotesWithNumbers(your_text, a_map)
return converted_text
|
82f926a3366ff29d65160ac9d09f5e72cf37b8cd
| 30,934 |
def categorical(cum_weights):
"""
Sample from a discrete distribution
:param cum_weights: list of cumulative sums of probabilities (should satisfy cum_weights[-1] = 1)
:return: sampled integer from `range(0,len(cum_weights))`
"""
p = _rand.random()
i = 0
while p > cum_weights[i]:
i += 1
return i
|
c14009b98d5f683fc92cdeed64db0ca8276cd868
| 30,935 |
def floodfill(image, *args, **kwargs):
"""
Performs a floodfill on the given image
:Parameters:
image : `Image` or `numpy.array` or `basestring`
The image object we would like to operate on as a numpy array, url, filepath, or image object
keycolor : `tuple`
The color to key out in the floodfill. If `None` specified, this is detected
automatically. Default=None
channel : `basestring`
The channel we wish to be replaced with `replacevalue` for all pixels contiguous
with the seeded pixel. Valid values are 'r','g','b', and 'a'. Default='a'
replacevalue : `float`
The value to replace the given channel with for the pixels that match the keycolor
and are contiguous with the seeded pixel. Default=0.0
tol : `float`
The threshold tolerance to allow in the difference of squares between pixels contiguous with the
seed pixel and the key color. The the difference of squared norms exceeds tol, the pixel is not
treated as equal for the purpose of floodfilling. Default=.04
seedpixel : `tuple`
Any iterable of (row, col) indices for where to begin the floodfill. Note that this is assumed
to be the same as keycolor. If not, the floodfill may end up being a no-op.
If seedpixel is None, the seed is determined procedurally by spiraling inward clockwise from the
upper righthand pixel of the image. Default=None
:Returns:
A copy of the image after floodfill has been performed
:Rtype:
`Image`
"""
image = Image.from_any(image)
return FloodfillOperation(image, *args, **kwargs).run()
|
1f8ce1bd87c3ecbd36b32524676208357ccefa78
| 30,936 |
def get_words_per_sentence(content):
"""
Get words per sentance average
:content: str
:returns: int
"""
words = get_words(content)
sentences = get_sentences(content)
return words / sentences
|
b3b4e8858f531723fee97d1cc3ba18c85ff16f92
| 30,937 |
def timeout_soft_cmd(cmd, timeout):
"""Same as timeout_cmd buf using SIGTERM on timeout."""
if not timeout:
return cmd
return 'timeout %us stdbuf -o0 -e0 %s' % (timeout, cmd)
|
79491bf29a80678381e06ee1e9fe1feda858faf2
| 30,938 |
from typing import List
from typing import Tuple
def get_all_links_and_port_names_of_equipment(
client: SymphonyClient, equipment: Equipment
) -> List[Tuple[Link, str]]:
"""Returns all links and port names in equipment.
Args:
equipment ( `pyinventory.common.data_class.Equipment` ): could be retrieved from
- `pyinventory.api.equipment.get_equipment`
- `pyinventory.api.equipment.get_equipment_in_position`
- `pyinventory.api.equipment.add_equipment`
- `pyinventory.api.equipment.add_equipment_to_position`
Returns:
List[Tuple[ `pyinventory.common.data_class.Link` , str]]:
- `pyinventory.common.data_class.Link` - link object
- str - port definition name
Raises:
`pyinventory.exceptions.EntityNotFoundError`: if link not found
FailedOperationException: for internal inventory error
Example:
```
location = client.get_location({("Country", "LS_IND_Prod_Copy")})
equipment = client.get_equipment(name="indProdCpy1_AIO", location=location1)
client.get_all_links_and_port_names_of_equipment(equipment=equipment)
```
"""
equipment_data = EquipmentPortsQuery.execute(client, id=equipment.id)
if equipment_data is None:
raise EntityNotFoundError(entity=Entity.Equipment, entity_id=equipment.id)
ports = equipment_data.ports
result = []
for port in ports:
port_link = port.link
if port_link is not None:
link = Link(
id=port_link.id,
properties=port_link.properties,
service_ids=[s.id for s in port_link.services if port_link.services]
if port_link.services is not None
else [],
)
result.append((link, port.definition.name))
return result
|
a1cbf02744630fa4e7e72799f351e4befdb5737e
| 30,939 |
def get_model_input(image):
"""Function to perform the preprocessing required so that it can be passed to the model for prediction
Args:
image (PIL image object): image that has to be transformed
Returns:
tensor (4D torch tensor): transformed image to torch 4D tensor (B, C, H, W)
"""
transformer = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.476, 0.452, 0.392],
std=[0.235, 0.231, 0.229])])
image = transformer(image)
image = image.unsqueeze(0)
return image
|
cad77ab6c43a486838748cd1e8be39561171ac58
| 30,940 |
def extract_api_tree():
"""
Generates a tree of command group names and function
signatures in leaves from API function names in Master
"""
api_funcs = {}
for i in dir(session['master'])+dir(public_api(None)):
if (i.startswith('api_') or i.startswith('admin_api_') or
i.startswith('public_api_')):
is_public = False
# Get function names and signatures
if i.startswith('api_'):
func_str = i[4:]
elif i.startswith('admin_'):
func_str = i[10:]
else: # is public api
func_str = i[11:]
is_public = True
cmd_groups = func_str.split('_')
func_sig = session['master'].get_api_signature(
i) if not is_public else public_api(None).get_api_signature(i)
func_doc = session['master'].get_api_doc(
i) if not is_public else public_api(None).get_api_doc(i)
# Build hierarchy of command groups
api_root = api_funcs
for j in cmd_groups:
if (j not in api_root.keys()):
api_root[j] = {}
api_root = api_root[j]
api_root['leaf'] = [i, func_sig, is_public, func_doc]
return api_funcs
|
c5fe7aacf5a4b6aad1e187307662d5d7f8601b98
| 30,941 |
def dataset_names_csv():
"""Returns the expected dataset names included in the tool."""
return resource_loader("dataset_names.csv")
|
cbb038d08982327a99657e1820136386ed3da4e4
| 30,943 |
def is_subtree(cls_name):
"""Determine whether 'cls_name' is a subtree."""
if cls_name == "SequentialCell":
return True
if cls_name in _ms_common_ns or cls_name in _ms_nn_ns or cls_name in _ms_ops_ns:
return False
return True
|
46728b25d9098f6561861fa1f182f53567d5ece9
| 30,944 |
import re
def relevancy_to_adjust(relevancy):
"""
Convert the old test case relevancy into adjust rules
Expects a string or list of strings with relevancy rules.
Returns a list of dictionaries with adjust rules.
"""
rules = list()
rule = dict()
if isinstance(relevancy, list):
relevancy = '\n'.join(str(line) for line in relevancy)
for line in re.split(r'\s*\n\s*', relevancy.strip()):
# Extract possible comments
try:
line, rule['because'] = re.search(RELEVANCY_COMMENT, line).groups()
except Exception:
pass
# Nothing to do with empty lines
if not line:
continue
# Split rule
try:
condition, decision = re.search(RELEVANCY_RULE, line).groups()
except Exception:
raise tmt.utils.ConvertError(
f"Invalid test case relevancy rule '{line}'.")
# Handle the decision
if decision.lower() == 'false':
rule['enabled'] = False
else:
try:
rule['environment'] = tmt.utils.shell_to_dict(decision)
except tmt.utils.GeneralError:
raise tmt.utils.ConvertError(
f"Invalid test case relevancy decision '{decision}'.")
# Adjust condition syntax
expressions = list()
for expression in re.split(r'\s*&&?\s*', condition):
try:
left, operator, right = re.match(
RELEVANCY_EXPRESSION, expression).groups()
except Exception:
raise tmt.utils.ConvertError(
f"Invalid test case relevancy expression '{expression}'.")
# Always use double == for equality comparison
if operator == '=':
operator = '=='
# Basic operators
if operator in ['==', '!=', '<', '<=', '>', '>=']:
# Use the special comparison for product and distro
# when the definition specifies a minor version
if left in ['distro', 'product'] and '.' in right:
operator = '~' + ('=' if operator == '==' else operator)
# Special operators
else:
try:
operator = {
'contains': '==',
'!contains': '!=',
'defined': 'is defined',
'!defined': 'is not defined',
}[operator]
except KeyError:
raise tmt.utils.ConvertError(
f"Invalid test case relevancy operator '{operator}'.")
# Special handling for the '!=' operator with comma-separated
# values (in relevancy this was treated as 'no value equals')
values = re.split(r'\s*,\s*', right)
if operator == '!=' and len(values) > 1:
for value in values:
expressions.append(f"{left} != {value}")
continue
# Join 'left operator right' with spaces
expressions.append(
' '.join([item for item in [left, operator, right] if item]))
# Finish the rule definition
rule['when'] = ' and '.join(expressions)
rule['continue'] = False
rules.append(rule)
rule = dict()
return rules
|
960be0ef1fa0cce57dcd039e95fb031383ab25b5
| 30,945 |
def create_bcs(dim, H, Hmin, inlet_velocity, inlet_velocityOil,
V_0, solutes, subdomains_file, WaterOilInlet,
concentration_left,
interface_thickness,
enable_NS, enable_PF, enable_EC,
mesh, boundaries_Facet, contact_angle, **namespace):
""" The boundaries and boundary conditions are defined here. """
mvc = df.MeshValueCollection("size_t", mesh, dim-1)
with df.XDMFFile(subdomains_file) as infile:
infile.read(mvc, "name_to_read")
facet_domains = df.cpp.mesh.MeshFunctionSizet(mesh, mvc)
# Re-create boundaries with facet_domain for mesh relevance
boundaries = dict(
inlet = [facet_domains, boundaries_Facet["inlet"]],
inletT = [facet_domains, boundaries_Facet["inletT"]],
inletB = [facet_domains, boundaries_Facet["inletB"]],
outlet = [facet_domains, boundaries_Facet["outlet"]],
wallLR = [facet_domains, boundaries_Facet["wallLR"]],
wallRL = [facet_domains, boundaries_Facet["wallRL"]]
)
# Alocating the boundary dicts
bcs = dict()
bcs_pointwise = dict()
for boundary in boundaries:
bcs[boundary] = dict()
### Velocity has 3 inlets in this example due
# to the flow focusing pinching aspect
## Velocity Phase Flow In
#length inlet, water inflow, X or Y, Positive/neg flow along axis
if not WaterOilInlet == 2:
velocity_expr = velocity_init(H[0], inlet_velocity, 0, 1, Hmin[0])
velocity_in = Fixed(velocity_expr)
if enable_NS:
bcs["inlet"]["u"] = velocity_in
if WaterOilInlet == 1:
bcs["inletT"]["u"] = NoSlip()
bcs["inletB"]["u"] = NoSlip()
## Velocity Top In
#length inlet, water inflow, X or Y, Positive/neg flow along axis
if not WaterOilInlet == 1:
velocity_expr = velocity_init(H[1], inlet_velocityOil, 1, -1, Hmin[1])
velocity_inT = Fixed(velocity_expr)
## Velocity Bottom In
#length inlet, water inflow, X or Y, Positive/neg flow along axis
if not WaterOilInlet == 1:
velocity_expr = velocity_init(H[1], inlet_velocityOil, 1, 1, Hmin[1])
velocity_inB = Fixed(velocity_expr)
if enable_NS:
bcs["inletT"]["u"] = velocity_inT
bcs["inletB"]["u"] = velocity_inB
if WaterOilInlet == 2:
bcs["inlet"]["u"] = NoSlip()
pressure_out = Pressure(0.0)
noslip = NoSlip()
V_left = Fixed(V_0)
V_right = Fixed(0.)
## Define boundaries
# Note we have one outlet and two sets of walls
# from experience (FEniCS), opposite boundaries can
# behave badly when all grouped
if enable_NS:
bcs["outlet"]["p"] = pressure_out
bcs["wallLR"]["u"] = noslip
bcs["wallRL"]["u"] = noslip
# Phase field uses an expersion `tanH` which defines PF drop off
if enable_PF:
phi_expr = df.Expression(
"tanh((abs((x[1]-Hmin)-H/2)-H/16)/(sqrt(2)*eps))",
H=H[0], Hmin = Hmin[0], eps=interface_thickness,
degree=2)
phi_inlet = Fixed(phi_expr)
## PF Fixed across boundary
# as no water can enter oil inlet
# and vice-versa
bcs["inlet"]["phi"] = Fixed(df.Constant(-1.))
bcs["inletT"]["phi"] = Fixed(df.Constant(1.))
bcs["inletB"]["phi"] = Fixed(df.Constant(1.))
## Add contact angle to NS No-Slip Boudnaries
bcs["wallLR"]["phi"] = ContactAngle(contact_angle)
bcs["wallRL"]["phi"] = ContactAngle(contact_angle)
return boundaries, bcs, bcs_pointwise
|
d69db7ec4adbcddbaa3f6ffd95a2cff9333fd647
| 30,946 |
def get_parameter_values(parameter, Grid, selectedmodels, noofind):
"""
Get parameter values from grid
Parameters
----------
parameter : str
Grid, hdf5 object
selectedmodels :
models to return
noofind :
number of parameter values
Returns
-------
x_all : array
parameter values
"""
x_all = np.zeros(noofind)
i = 0
for modelpath in selectedmodels:
N = len(selectedmodels[modelpath].logPDF)
try:
x_all[i : i + N] = selectedmodels[modelpath].paramvalues[parameter]
except Exception:
x_all[i : i + N] = Grid[modelpath + "/" + parameter][
selectedmodels[modelpath].index
]
i += N
return x_all
|
5e5dbfe3d810cb40e55c9e8cc1b6f33841212584
| 30,947 |
def dumpj(game):
"""Dump a game to json"""
return game.to_json()
|
bac5480ea2b3136cbd18d0690af27f94e4a2b6a3
| 30,948 |
def __add_statement2issue(statement_uid: int, issue_uid: int) -> StatementToIssue:
"""
Adds a new statement to issue link to the database
:param statement_uid: id of the related statement
:param issue_uid: id of the related issue
:return: New statement to issue object
"""
db_statement2issue = StatementToIssue(statement=statement_uid, issue=issue_uid)
DBDiscussionSession.add(db_statement2issue)
DBDiscussionSession.flush()
return db_statement2issue
|
415ab377f048a6f0a490606853e38c57b2d00e45
| 30,949 |
def CreateInnerMaskBmapFromOuterMask( srcBmap ) :
"""
Derive the inner mask wxBitmap from the Outer mask wxBitmap.
The srcBmap must be "well behaved" in that a continuous border
must present so that a floodfill to the perimeter area will not reach
into the inner area. The border color must be >=128. So,
the srcBmap consists of a transparent/BLACK perimeter, an white/opaque
frame border and a transparent/BLACK inner area.
When completed, the outer_area+border will be transparent/BLACK,
the parent's frame border will be transparent/BLACK and the inner area
will be opaque/WHITE.
1. outer perimeter (black) --> Floodfill to white/255
Now both perimeter and border are white).
2. Invert the image and return as a wxBitmap..
"""
# Start with an 'L' Pil copy of the RGB input wxBitmap.
dstPilImage = ImgConv.PilImageFromWxBitmap( srcBmap ).convert( 'L' )
# Make sure the image is quantized to binary.
dstPilImage = dstPilImage.point(lambda i: (i / 128) * 255)
size = dstPilImage.size
ImageDraw.floodfill( dstPilImage, (0, 0), (255) )
return ImgConv.WxBitmapFromPilImage( ImageChops.invert( dstPilImage ) )
|
76b17d0d3d252bf316ba42c77e3b203f8e80a474
| 30,950 |
def generate_fake_example(w: int, h: int, identifier: int):
"""Generate a random COCO example."""
num_objects = 8
return {
'image': np.random.randint(0, 256, size=(w, h, 3), dtype=np.uint8),
'image/filename': f'{identifier:012}.jpg',
'image/id': identifier,
'objects': {
'area': np.arange(num_objects, dtype=np.int64) * 50,
'bbox': np.stack([np.array([0., 0., 1., 1.])] * num_objects),
'id': np.arange(num_objects),
'is_crowd': np.full((num_objects,), False, dtype=np.bool),
'label': np.random.randint(
0, 81, size=(num_objects,), dtype=np.int32),
}
}
|
38256e8de420a1a81875138936331e7fb914e72c
| 30,951 |
from ..learn.optim import SGDW
from ..learn.optim import NesterovSGD
from ..learn.optim import NesterovSGDW
from ..learn.optim import AMSGrad
import torch
def get_optim(name: str):
""" get an optimizer by name """
if name.lower() == 'adam':
optimizer = torch.optim.Adam
elif name.lower() == 'adamw':
optimizer = torch.optim.AdamW
elif name.lower() == 'sgd':
optimizer = torch.optim.SGD
elif name.lower() == 'sgdw':
optimizer = SGDW
elif name.lower() == 'nsgd':
optimizer = NesterovSGD
elif name.lower() == 'nsgdw':
optimizer = NesterovSGDW
elif name.lower() == 'rmsprop':
optimizer = torch.optim.rmsprop
elif name.lower() == 'adagrad':
optimizer = torch.optim.adagrad
elif name.lower() == 'amsgrad':
optimizer = AMSGrad
else:
raise SynthtorchError(f'Optimizer: "{name}" not a valid optimizer routine or not supported.')
return optimizer
|
22c77840cffc8f6d5bd89bd190c8b912728cf6fa
| 30,952 |
import types
def _get_functions_names(module):
"""Get names of the functions in the current module"""
return [name for name in dir(module) if
isinstance(getattr(module, name, None), types.FunctionType)]
|
581384740dc27c15ac9710d66e9b0f897c906b96
| 30,953 |
import ast
def ex_rvalue(name):
"""A variable store expression."""
return ast.Name(name, ast.Load())
|
4afff97283d96fd29740de5b7a97ef64aad66efe
| 30,954 |
def roll(input, shifts, dims=None):
"""Roll elements along the given dimension.
:attr:`dims` could be negative or ``None``:
```python
x = torch.tensor([[1, 2, 3], [4, 5, 6]])
# A negative dimension is the last-k dimension
print(torch.roll(x, shifts=1, dims=1)) # [[3, 1, 2], [6, 4, 5]]
print(torch.roll(x, shifts=1, dims=-1)) # Equivalent
# If dimension is None, roll input as a vector
print(torch.roll(x, shifts=1)) # [[6, 1, 2], [3, 4, 5]]
# Also, dimension could be a sequence of integers
print(torch.roll(x, shifts=(1, 1), dims=(0, 1))) # [[6, 4, 5], [3, 1, 2]]
print(torch.roll(x, shifts=(1, -1), dims=(0, 1))) # [[5, 6, 4], [2, 3, 1]]
```
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
shifts : Union[int, Sequence[int]]
The rolling offset of each dimension.
dims : Union[int, Sequence[int]], optional
The dimension to roll.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
"""
shifts = nest.flatten(shifts)
dims = nest.flatten(dims) if dims is not None else dims
return Function.apply(
'Roll', input.device, [input],
num_shifts=len(shifts), shifts=shifts, axes=dims)
|
9ad3f85a3313be66fed358894d9fd105aa3c1c32
| 30,955 |
def gsl_eigen_genhermv_free(*args, **kwargs):
"""gsl_eigen_genhermv_free(gsl_eigen_genhermv_workspace w)"""
return _gslwrap.gsl_eigen_genhermv_free(*args, **kwargs)
|
738832672277da7d6b8780a501aff36d740e3eba
| 30,957 |
def model3():
"""
PyMC configuration with Model 1.
preevac_alpha vs theta[0] + theta[1]*type + theta[2]*eff_wid + theta[3]*tread
"""
# Priors
theta = mc.Uniform('theta',
lower=[-10.0, -10.0],
upper=[ 10.0, 10.0],
value=[ 0.1, 0.1])
sigma = mc.Uniform('sigma', lower=0., upper=100., value=1.)
# Model
@mc.deterministic
def y_mean(theta=theta,
beta = data_evac.data_alphabeta['beta']):
return theta[0]*beta + theta[1]
# Likelihood
# The likelihood is N(y_mean, sigma^2), where sigma
# is pulled from a uniform distribution.
y_obs = mc.Normal('y_obs',
value=data_evac.data_alphabeta['alpha'],
mu=y_mean,
tau=sigma**-2,
observed=True)
return vars()
|
c79aa2edbd1cff864a48b5476106afaf49d74975
| 30,959 |
def getVTGui():
"""Small wrapper to hide the fact that vtapp object contains gui.
:return: main window object
"""
return vtutils.getVTApp().gui
|
08ac978c35a6530cddc154ef1df925ea339559f3
| 30,960 |
def ksd_parametric_custom(ksd_value, alpha, B_parametric):
"""
Compute KSD test using a parametric bootstrap with kernel matrix
inputs: ksd_values: (N,) array consisting of KSD values
for N bandwidths for inputs X and score_X
alpha: real number in (0,1) (level of the test)
B_parametric: (N, B) array of ksd values computed with
the reference bandwidth using samples from the model
output: result of KSD test (1 for "REJECT H_0" and 0 for "FAIL TO REJECT H_0")
"""
B = B_parametric.shape[0]
if ksd_value > B_parametric[int(np.ceil(B * (1 - alpha))) - 1]:
return 1
return 0
|
4c4cf6ae1edde41cb519bf7459950f1f1dd42c8a
| 30,961 |
def freehand(img, depth=10., el=np.pi / 2.2, az=np.pi / 4):
"""
手绘风格图像生成
:param img:
:param depth: 深度,取值在0-100
:param el: 光源的俯视角度,弧度值
:param az: 光源的方位角度,弧度值
:return:
"""
img = rgb2grey(img)
img = img * 255 if np.max(img) <= 1.1 else img
grad = np.gradient(img) # 取图像灰度的梯度值
grad_x, grad_y = grad # 分别取横纵图像梯度值
grad_x = grad_x * depth / 100.
grad_y = grad_y * depth / 100.
A = np.sqrt(grad_x ** 2 + grad_y ** 2 + 1.)
uni_x = grad_x / A
uni_y = grad_y / A
uni_z = 1. / A
dx = np.cos(el) * np.cos(az) # 光源对x轴的影响
dy = np.cos(el) * np.sin(az) # 光源对y轴的影响
dz = np.sin(el) # 光源对z轴的影响
gd = (dx * uni_x + dy * uni_y + dz * uni_z) # 光源归一化
gd = gd.clip(0, 1) # 避免数据越界,将生成的灰度值裁剪至0-1之间
return gd
|
06874581c622a671ed6e0cbb3a33dfed020902f5
| 30,962 |
def batch_compute_ic(trajs, params, *, weights=None, method="fft"):
"""Compute a batch of integrated correlated matrices.
Parameters
----------
trajs : list of (n_frames[i], n_features) ndarray
List of featurized trajectories.
params : 1d array-like or list of 1d array-like
If a 1d array-like, list of VAC lag times.
If a list of 1d array-like, list of IVAC lag times.
weights : int or list of (n_frames[i],) ndarray
Weight of trajectory starting at each configuration.
If int, assume uniform weights except for the last int frames,
which have zero weight.
method : str, optional
Method to compute integrated correlation matrices. Must be
'direct', 'fft', or 'fft-all'. Methods 'direct' and 'fft'
compute the matrices one by one using the compute_ic function.
Method 'fft-all' computes all of the correlation matrices at
once using a FFT convolution between each pair of features.
Returns
-------
iterable of (n_features, n_features) ndarray
Iterable of integrated correlation matrices.
"""
if np.asarray(params[0]).ndim == 0:
params = np.asarray(params)
assert params.ndim == 1
flag = True
else:
params = [np.asarray(param) for param in params]
assert np.all([param.ndim == 1 for param in params])
flag = False
if method == "fft-all":
if weights is None:
if flag:
return batch_ct_all(trajs, params)
else:
return batch_ic_all(trajs, params)
else:
if flag:
return batch_ct_rt(trajs, params, weights)
else:
return batch_ic_rt(trajs, params, weights)
# compute each matrix one by one
return (
compute_ic(trajs, param, weights=weights, method=method)
for param in params
)
|
c6a931820eb1d17477143e615b942551e1c6369f
| 30,963 |
def genTrainFeatures(dimension=128):
"""
Input:
dimension: desired dimension of the features
Output:
X: n feature vectors of dimensionality d (nxd)
Y: n labels (-1 = girl, +1 = boy) (n)
"""
# Load in the data
Xgirls = name2features("data/girls.train", B=dimension)
Xboys = name2features("data/boys.train", B=dimension)
X = np.concatenate([Xgirls, Xboys])
# Generate Labels
Y = np.concatenate([-np.ones(len(Xgirls)), np.ones(len(Xboys))])
# shuffle data into random order
ii = np.random.permutation([i for i in range(len(Y))])
return X[ii, :], Y[ii]
|
f5c9838f81caf1cc9641de450e6eb9a844e47ccd
| 30,965 |
def stateDiff(start, end):
"""Calculate time difference between two states."""
consumed = (end.getTimestamp() - start.getTimestamp()).total_seconds()
return consumed
|
1f76903e2486e2c378f338143461d1d15f7993a6
| 30,966 |
import copy
import random
def staticDepthLimit(max_depth):
"""Implement a static limit on the depth of a GP tree, as defined by Koza
in [Koza1989]. It may be used to decorate both crossover and mutation
operators. When an invalid (too high) child is generated, it is simply
replaced by one of its parents.
This operator can be used to avoid memory errors occuring when the tree
gets higher than 90-95 levels (as Python puts a limit on the call stack
depth), because it ensures that no tree higher than *max_depth* will ever
be accepted in the population (except if it was generated at initialization
time).
:param max_depth: The maximum depth allowed for an individual.
:returns: A decorator that can be applied to a GP operator using \
:func:`~deap.base.Toolbox.decorate`
.. note::
If you want to reproduce the exact behavior intended by Koza, set
the *max_depth* param to 17.
.. [Koza1989] J.R. Koza, Genetic Programming - On the Programming of
Computers by Means of Natural Selection (MIT Press,
Cambridge, MA, 1992)
"""
def decorator(func):
def wrapper(*args, **kwargs):
keep_inds = [copy.deepcopy(ind) for ind in args]
new_inds = list(func(*args, **kwargs))
for i, ind in enumerate(new_inds):
if ind.height > max_depth:
new_inds[i] = random.choice(keep_inds)
return new_inds
return wrapper
return decorator
|
cdcb1e58a681b622ced58e9aa36562e1fedb6083
| 30,967 |
def rotate(coor, alpha, beta, gamma):
"""Rotate 'coor' by the angles alpha, beta, gamma.
"""
R1 = getD(alpha)
R2 = getC(beta)
R3 = getD(gamma)
M = R3 * R2 * R1
return np.dot(coor, M)
|
fc7348dfd65012239841949da82a188299305e97
| 30,968 |
import requests
def get_content(url):
"""get content - cfscrape"""
request = requests.get(url)
content = request.text
if request.status_code == 503:
scraper = cfscrape.create_scraper()
content = scraper.get(url).content
return content
|
4f6db08c2cc393f2ac6a1447f6bd9a44535de12a
| 30,970 |
import re
def titlecase(string):
"""Turn string of words into titlecased words.
:type string: str
:param string: A string of words.
"""
return re.sub(
r"[A-Za-z]+('[A-Za-z]+)?",
lambda mo: mo.group(0)[0].upper() + mo.group(0)[1:].lower(),
string,
)
|
77976d2ccad5b6b924b76d587a6883cf660497d0
| 30,971 |
import re
from datetime import datetime
def youdao_definition(wrapped_word):
"""
get word meaning from youdao.com, thanks for their great work.
"""
word = wrapped_word[:-2].strip()
url = constants.YOUDAO_URL_PREFIX + word
content = utility.get_content_of_url(url)
soup = bs4.BeautifulSoup(markup=content, features='lxml')
word_meaning_dict = dict()
# ----------------------basic-----------------------
basic = soup.find('div', attrs={'class': 'baav'})
basic_str = ''
if basic is not None:
basic_str += ' '.join(list(basic.stripped_strings)) + '\n'
basic = soup.find("div", id="phrsListTab")
if basic is not None:
result = basic.find('div', attrs={'class': 'trans-container'})
if result is not None:
basic_str += result.ul.get_text().strip('\n')
# if basic_str of word is '', we can make sure that this word or phrase does not exist.
if basic_str == '':
return None
word_meaning_dict['basic'] = basic_str
# -------------------词组短语---------------------
result = soup.find('div', id='transformToggle')
if result is not None:
phrase = result.find('div', id='wordGroup')
if phrase is not None:
phrase_str = ''
for i, s in enumerate(phrase.stripped_strings):
r = s.replace('\n', '')
if r.find(word) >= 0:
if i + 1 >= len(list(phrase.stripped_strings)):
break
phrase_str += r + ' ' + \
re.sub('\s*', '', list(phrase.stripped_strings)
[i + 1]) + '\n'
phrase_str = re.sub("\s+,", ",", phrase_str.strip())
phrase_str = re.sub("\s+◎", "◎", phrase_str.strip())
# print(phrase_str)
if len(phrase_str) != 0:
word_meaning_dict['phrase'] = phrase_str.strip('\n')
# -------------------同近义词---------------------
result = soup.find('div', id='transformToggle')
if result is not None:
synonyms = result.find('div', id='synonyms')
if synonyms is not None:
synonyms_str = ''
lst = []
for s in synonyms.stripped_strings:
lst.append(s)
i_next_index = 0
for i, s in enumerate(lst):
if i < i_next_index:
continue
if is_start_word_type(s):
synonyms_str += s + '\n'
j_next_index = 0
j_begin = i + 1
for j, d in enumerate(lst[j_begin:]):
if j < j_next_index:
continue
if is_start_word_type(d):
i_next_index = j_begin + j
synonyms_str += '\n'
break
if d == ',':
synonyms_str += ', '
else:
synonyms_str += d
word_meaning_dict['synonyms'] = synonyms_str.strip('\n')
# -------------------同根词---------------------
result = soup.find('div', id='transformToggle')
if result is not None:
rel_word_tab = result.find('div', id='relWordTab')
if rel_word_tab is not None:
rel_word_tab_str = ''
is_found = False
for i, s in enumerate(rel_word_tab.stripped_strings):
if s == u'词根:':
rel_word_tab_str += s + ' ' + \
list(rel_word_tab.stripped_strings)[i + 1] + '\n'
is_found = True
continue
if is_found:
is_found = False
continue
if s.find('.') >= 0:
rel_word_tab_str += s + '\n'
elif s.encode(
'utf-8').isalpha(): # without "encode('utf-8')", the Chinese symbol is recognized as alpha
rel_word_tab_str += s + ' '
else:
if len(rel_word_tab_str) > 0:
rel_word_tab_str += s + '\n'
else:
rel_word_tab_str += s
word_meaning_dict['rel_word_tab'] = rel_word_tab_str.strip('\n')
# -------------------词语辨析---------------------
result = soup.find('div', id='transformToggle')
if result is not None:
discriminate = result.find('div', id='discriminate')
if discriminate is not None:
discriminate_str = ''
is_found = False
for i, s in enumerate(discriminate.stripped_strings):
if is_found:
is_found = False
continue
if is_alpha_and_x(s, ','):
discriminate_str += '\n' + s + '\n'
attach = list(discriminate.stripped_strings)[i + 1] + '\n'
is_found = True
if whether_start_with_alpha(attach):
continue
else:
discriminate_str += attach
continue
if whether_only_alpha(s):
discriminate_str += s + ' '
if whether_has_non_alpha_symbol(s) and s != u'以上来源于' and s != u'网络':
discriminate_str += s + '\n'
word_meaning_dict['discriminate'] = discriminate_str.strip('\n')
# ---------------------collins---------------------
collins = soup.find('div', id="collinsResult")
if collins is not None:
text_list = []
for i, s in enumerate(collins.stripped_strings):
# tackle special formation problem
text_list.append(' '.join(s.split()))
line = ' '.join(text_list[3:])
collins_str = re.sub('例:', '\n例:', line)
collins_str = re.sub("\d+\.", "\n*", collins_str)
collins_str = collins_str[collins_str.find('*'):]
if len(collins_str.strip()) > 1:
word_meaning_dict['collins'] = collins_str.encode('utf-8')
word_meaning_dict['word'] = wrapped_word
word_meaning_dict['date'] = str(datetime.datetime.now())[:-7]
return word_meaning_dict
|
207a0777122bb1a0945037e18a916217f6c084cb
| 30,972 |
from operator import mod
def bahai_from_fixed(date):
"""Return Bahai date [major, cycle, year, month, day] corresponding
to fixed date, date."""
g_year = gregorian_year_from_fixed(date)
start = gregorian_year_from_fixed(BAHAI_EPOCH)
years = (g_year - start -
(1 if (date <= fixed_from_gregorian(
gregorian_date(g_year, MARCH, 20))) else 0))
major = 1 + quotient(years, 361)
cycle = 1 + quotient(mod(years, 361), 19)
year = 1 + mod(years, 19)
days = date - fixed_from_bahai(bahai_date(major, cycle, year, 1, 1))
# month
if (date >= fixed_from_bahai(bahai_date(major, cycle, year, 19, 1))):
month = 19
elif (date >= fixed_from_bahai(
bahai_date(major, cycle, year, AYYAM_I_HA, 1))):
month = AYYAM_I_HA
else:
month = 1 + quotient(days, 19)
day = date + 1 - fixed_from_bahai(bahai_date(major, cycle, year, month, 1))
return bahai_date(major, cycle, year, month, day)
|
bf31da3d961fc00abd764e06bae70094cb81af23
| 30,973 |
def create_or_update_record(tableName, record):
"""
Function to create or update a record in DynamoDB
Params:
tableName::str
The table name to get the record
record::dict
The object to store
Returns:
bool
If the record was inserted or not
"""
if not tableName or not record:
return False
if not {'username', 'index'}.issubset(record):
return False
try:
res = ddb.Table(tableName).get_item(
Key = {
"username": record['username'],
"index": record['index']
}
)
record = { **res['Item'], **record } if 'Item' in res else record
ddb.Table(tableName).put_item(
Item = record
)
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ create_or_update_record\n{}".format(e))
return None
|
4b1fbec40b404d93aefa2da728b5966917b2264a
| 30,974 |
import http
import json
def get_service_status(fledge_url):
"""
Return ping status from fledge.
Args:
fledge_url: The URL of Fledge.
Returns:
A json string that contains ping status.
"""
_connection = http.client.HTTPConnection(fledge_url)
_connection.request("GET", '/fledge/service')
r = _connection.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
return jdoc
|
5c0381acba98dc4ff060f36671ec6f15595e8afb
| 30,976 |
def png_to_jpeg(image_bytes: bytes, quality: int = 100) -> np.ndarray:
"""Converts PNG image (bytes or str) to JPEG (bytes)."""
runner = _get_runner()
decode_fn = lambda img: tf.image.decode_png(img, channels=3)
image = runner.run(decode_fn, image_bytes)
fn = lambda img: tf.image.encode_jpeg(img, format='rgb', quality=quality)
return runner.run(fn, image)
|
fa9cde555f2f2bba6375ecdb195a42ba6d753497
| 30,977 |
import urllib
def get_object_metadata(sess, bucket_name, blob_name):
"""
get object metadata
"""
url = "https://www.googleapis.com/storage/v1/b/{}/o/{}".format(
bucket_name, urllib.quote(blob_name, safe="")
)
return sess.request(method="GET", url=url)
|
37fc44216e61e876c426b66a53054e59847bdbc6
| 30,978 |
def j2_pert(s):
"""Returns the J2 acceleration for a given state.
Args:
s(1x6 numpy array): the state vector [rx,ry,rz,vx,vy,vz]
Returns:
1x3 numpy array: the J2 acceleration [ax,ay,az]
"""
r = np.linalg.norm(s[0:3])
K = -3*mu_Earth*J2*(Re**2)/2/r**5
comp = np.array([1,1,3])
comp = comp - 5*(s[2]/r)**2
comp = np.multiply(comp,s[0:3])
comp = np.multiply(K,comp)
return comp
|
fc9561521d55e4f6f300dd9a7cdfc52ba49c9473
| 30,979 |
import scipy
def bootstrap(v):
"""
Constructs Monte Carlo simulated data set using the
Bootstrap algorithm.
Usage:
>>> bootstrap(x)
where x is either an array or a list of arrays. If it is a
list, the code returns the corresponding list of bootstrapped
arrays assuming that the same position in these arrays map the
same "physical" object.
Rodrigo Nemmen, http://goo.gl/8S1Oo
"""
if type(v)==list:
vboot=[] # list of boostrapped arrays
n=v[0].size
iran=scipy.random.randint(0,n,n) # Array of random indexes
for x in v: vboot.append(x[iran])
else: # if v is an array, not a list of arrays
n=v.size
iran=scipy.random.randint(0,n,n) # Array of random indexes
vboot=v[iran]
return vboot
|
7d6a194e68ad9833ef0cbef6de0b8ff6f5a7cd62
| 30,980 |
def html_table_header():
"""Return the HTML row with header cells used in all tables."""
markup = ("<tr>" +
"<th>Column name</th>" +
"<th>DataType</th>" +
"<th><abbr title='Primary Key'>PK</abbr></th>" +
"<th><abbr title='Foreign Key'>FK</abbr></th>" +
"<th><abbr title='Not Null'>NN</abbr></th>" +
"<th><abbr title='Unique'>UQ</abbr></th>" +
"<th><abbr title='Binary'>BIN</abbr></th>" +
"<th><abbr title='Unsigned'>UN</abbr></th>" +
"<th><abbr title='Zero Fill'>ZF</abbr></th>" +
"<th><abbr title='Auto Increment'>AI</abbr></th>" +
"<th>Default</th>" +
"<th>Comment</th>" +
"</tr>")
return markup
|
0fc65ca33cf23594dad007a3b0b16f1244ace62e
| 30,981 |
import glob
def load_images(input_files):
"""
Flattens each image in a folder into a 1D numpy array.
Next, each 1D numpy array is stacked into a 2D numpy array,
where each row of the image is the flattened version of the image
"""
imgfiles = glob.glob(input_files)
arr = []
for i, imgfile in enumerate(imgfiles):
arr.append(img2array(imgfile).reshape(-1,1))
return np.hstack(arr).T
|
06894e87f66a25ba195563a7a7ff961c365691db
| 30,982 |
def hpat_pandas_series_iloc(self):
"""
Pandas Series operators :attr:`pandas.Series.at`, :attr:`pandas.Series.iat`, :attr:`pandas.Series.iloc`, :attr:`pandas.Series.loc` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_iloc2
Parameters
----------
series: :class:`pandas.Series`
input series
Returns
-------
:obj:`pandas.Series`
returns an object of :obj:`pandas.Series`
"""
_func_name = 'Operator at/iat/iloc/loc().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_iloc_impl(self):
return self
return hpat_pandas_series_iloc_impl
|
56ec60440b540b2b15a38a730aad3f4cba4563f3
| 30,983 |
def checkLoggedIn(session):
"""
checks if any player
has logged in yet
"""
try:
return session["roll"] is not None
except KeyError:
session["roll"] = None
return False
|
436f51212abc9fe00abf11266cb90159a7f60bd4
| 30,984 |
import typing
import logging
def push_docker_image_buildah(external_docker_name, push_connection: typing.Optional[Connection]) -> str:
"""
Push the docker image using buildah
:param external_docker_name: external docker image target name, without host
:param push_connection: connection for pushing Docker images to
"""
if not push_connection:
return external_docker_name
remote_tag = f'{push_connection.spec.uri}/{external_docker_name}'
logging.info('Starting pushing of image')
tag_args = [
BUILDAH_BIN, 'tag',
external_docker_name,
remote_tag
]
run(*tag_args)
logging.info('Starting pushing of image')
push_args = [
BUILDAH_BIN, 'push',
*_extract_buildah_credentials(push_connection, remote_tag),
remote_tag
]
run(*push_args, sensitive=True)
return remote_tag
|
950fd26d65de99f035ea5d39e3f708b0173298f6
| 30,985 |
def _match(patspec, tree):
"""Test if a tree matches the given pattern statement; return the matches
>>> _match(b'f(_)', parse(b'f()'))
>>> _match(b'f(_)', parse(b'f(1)'))
[('func', ('symbol', 'f'), ('symbol', '1')), ('symbol', '1')]
>>> _match(b'f(_)', parse(b'f(1, 2)'))
"""
pattern = _cachedtree(patspec)
return parser.matchtree(pattern, tree, ('symbol', '_'),
{'keyvalue', 'list'})
|
1b0fd2ef103bc2e9a6e1ee3df855b736ff24c162
| 30,986 |
from typing import List
def list_methods(client: Client) -> List[str]:
"""Lists the methods which are available on the server.
Args:
client: A client instance.
Returns:
List of method names.
"""
return client._client.ListMethods()
|
a005113b9142f6de929b6c7accc4b0150041742c
| 30,987 |
import re
def MakeLocal(start, end, location, name):
"""
Create a local variable
@param start: start of address range for the local variable
@param end: end of address range for the local variable
@param location: the variable location in the "[bp+xx]" form where xx is
a number. The location can also be specified as a
register name.
@param name: name of the local variable
@return: 1-ok, 0-failure
@note: For the stack variables the end address is ignored.
If there is no function at 'start' then this function.
will fail.
"""
func = idaapi.get_func(start)
if not func:
return 0
# Find out if location is in the [bp+xx] form
r = re.compile("\[([a-z]+)([-+][0-9a-fx]+)", re.IGNORECASE)
m = r.match(location)
if m:
# Location in the form of [bp+xx]
register = idaapi.str2reg(m.group(1))
offset = int(m.group(2), 0)
frame = idaapi.get_frame(func)
if register == -1 or not frame:
return 0
offset += func.frsize
member = idaapi.get_member(frame, offset)
if member:
# Member already exists, rename it
if idaapi.set_member_name(frame, offset, name):
return 1
else:
return 0
else:
# No member at the offset, create a new one
if idaapi.add_struc_member(frame,
name,
offset,
idaapi.byteflag(),
None, 1) == 0:
return 1
else:
return 0
else:
# Location as simple register name
return idaapi.add_regvar(func, start, end, location, name, None)
|
1950cd443bb4a4618638943256ed602412133ca7
| 30,988 |
def accuracy(y, t):
"""
y: baredl.Tensor or np.ndarray (n, c)
n: number of samples
c: number of classes
Assuming it contains probabilities for each class
e.g. [[0.1,0.3,0.6], [0.1,0.8,0.1], ...]
t: baredl.Tensor or np.array (n,)
n: number of samples
Assuming it contains the true class label as index
e.g. [2,1,1,0,2,0,...]
"""
y, t = as_tensor(y), as_tensor(t)
pred = y.data.argmax(axis=1).reshape(t.shape)
result = (pred == t.data)
acc = result.mean()
return Tensor(as_array(acc))
|
7f810190ee0a3c36ab3f8f51a2d7869309614a75
| 30,989 |
from typing import Type
def get_config() -> Type[Config]:
"""Get the global spines configuration
Returns
-------
Config
The global configuration settings for the current spines
project.
"""
if _GLOBAL_CONFIG is None:
load_config()
return _GLOBAL_CONFIG.copy()
|
628337a2669237df265aafb7ad07746e075690c7
| 30,990 |
from typing import Optional
def prompt_yes_or_no(
question: str,
yes_text: str = 'Yes',
no_text: str = 'No',
has_to_match_case: bool = False,
enter_empty_confirms: bool = True,
default_is_yes: bool = False,
deselected_prefix: str = ' ',
selected_prefix: str = '\033[31m>\033[0m ',
char_prompt: bool = True) -> Optional[bool]:
"""Prompt the user to input yes or no.
Args:
question (str): The prompt asking the user to input.
yes_text (str, optional): The text corresponding to 'yes'.
no_text (str, optional): The text corresponding to 'no'.
has_to_match_case (bool, optional): Does the case have to match.
enter_empty_confirms (bool, optional): Does enter on empty string work.
default_is_yes (bool, optional): Is yes selected by default (no).
deselected_prefix (str, optional): Prefix if something is deselected.
selected_prefix (str, optional): Prefix if something is selected (> )
char_prompt (bool, optional): Add a [Y/N] to the prompt.
Returns:
Optional[bool]: The bool what has been selected.
"""
is_yes = default_is_yes
is_selected = enter_empty_confirms
current_message = ''
yn_prompt = f' ({yes_text[0]}/{no_text[0]}) ' if char_prompt else ': '
print()
while True:
yes = is_yes and is_selected
no = not is_yes and is_selected
print('\033[K'
f'{selected_prefix if yes else deselected_prefix}{yes_text}')
print('\033[K'
f'{selected_prefix if no else deselected_prefix}{no_text}')
print('\033[3A\r\033[K'
f'{question}{yn_prompt}{current_message}', end='', flush=True)
keypress = readchar.readkey()
if keypress in DefaultKeys.down or keypress in DefaultKeys.up:
is_yes = not is_yes
is_selected = True
current_message = yes_text if is_yes else no_text
elif keypress in DefaultKeys.delete:
if current_message:
current_message = current_message[:-1]
elif keypress in DefaultKeys.interrupt:
raise KeyboardInterrupt
elif keypress in DefaultKeys.confirm:
if is_selected:
break
elif keypress in '\t':
if is_selected:
current_message = yes_text if is_yes else no_text
else:
current_message += keypress
match_yes = yes_text
match_no = no_text
match_text = current_message
if not has_to_match_case:
match_yes = match_yes.upper()
match_no = match_no.upper()
match_text = match_text.upper()
if match_no.startswith(match_text):
is_selected = True
is_yes = False
elif match_yes.startswith(match_text):
is_selected = True
is_yes = True
else:
is_selected = False
print()
print('\033[K\n\033[K\n\033[K\n\033[3A')
return is_selected and is_yes
|
754659d5ab28715002d1a5d47a80e87a64d3b79f
| 30,991 |
import oci.object_storage
import mysqlsh
import re
import threading
def delete_bucket_object(name=None, **kwargs):
"""Deletes an object store bucket objects
Args:
name (str): The name of the object, can include * to match multiple
objects
**kwargs: Additional options
Keyword Args:
bucket_name (str): The name of the bucket.
compartment_id (str): OCID of the parent compartment.
config (object): An OCI config object or None.
interactive (bool): If set to false, function returns true on success
Returns:
None or True
"""
bucket_name = kwargs.get('bucket_name')
compartment_id = kwargs.get('compartment_id')
config = kwargs.get('config')
interactive = kwargs.get('interactive', True)
# Get the active config and compartment
try:
config = configuration.get_current_config(config=config)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
bucket_name = configuration.get_current_bucket_name(
bucket_name=bucket_name, config=config)
bucket = get_bucket(
bucket_name=bucket_name, compartment_id=compartment_id,
config=config)
if bucket is None:
if interactive:
print("Operation Cancelled.\n")
return
# Initialize the Object Store client
os_client = core.get_oci_object_storage_client(config=config)
# Get Object Store namespace
namespace_name = get_object_store_namespace(config)
# If the user specified * as name, delete all
if name and (name == '*' or '*' in name):
# Get object list
objects = oci.pagination.list_call_get_all_results(
os_client.list_objects,
namespace_name=namespace_name,
bucket_name=bucket.name,
limit=1000).data.objects
# Filter list
if name != '*':
name = name.lower()
# Filter list if PARs
if '*' in name:
name_pattern = '^' + name.replace('*', '.+')
objects = [obj for obj in objects
if re.search(name_pattern, obj.name.lower())]
else:
objects = [obj for obj in objects
if name == obj.name.lower()]
# Get object count
obj_count = len(objects)
if obj_count == 0:
print("No objects to delete in this bucket.")
return
# Prompt the user for confirmation
if interactive:
prompt = mysqlsh.globals.shell.prompt(
f"Are you sure you want to delete {obj_count} object"
f"{'s' if obj_count > 1 else ''} from {bucket.name} "
f"[yes/NO]: ",
{'defaultValue': 'no'}).strip().lower()
if prompt != "yes":
print("Deletion aborted.\n")
return
# Delete all objects
print(f"Deleting {obj_count} "
f"object{'s' if obj_count > 1 else ''}.")
thread_count = NTHREAD if obj_count > NTHREAD else obj_count
ths = [threading.Thread(
target=delete_file_from_list_from_bucket,
args=(i, os_client, objects, namespace_name, bucket.name,
thread_count))
for i in range(thread_count)]
for th in ths:
th.daemon = True
th.start()
for th in ths:
th.join()
if interactive:
print(f"Bucket object{'s' if '*' in name else ''} "
f"deleted successfully.")
elif name:
os_client.delete_object(
namespace_name=namespace_name, bucket_name=bucket.name,
object_name=name)
print(f"Bucket object '{name}' deleted successfully.")
elif interactive:
# Get object list
bucket_objects = oci.pagination.list_call_get_all_results(
os_client.list_objects,
namespace_name=namespace_name,
bucket_name=bucket.name,
limit=1000).data.objects
print(format_bucket_objects_listing(bucket_objects=bucket_objects))
obj_summary = core.prompt_for_list_item(
item_list=bucket_objects,
prompt_caption="Please enter the index or name of an object: ",
item_name_property="name")
if obj_summary is None:
print("Operation cancelled.")
return
name = obj_summary.name
os_client.delete_object(
namespace_name=namespace_name, bucket_name=bucket.name,
object_name=name)
print(f"Bucket object '{name}' deleted successfully.")
else:
print('No Object name given.')
if not interactive:
return True
except oci.exceptions.ServiceError as e:
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
return
except Exception as e:
print(f'ERROR: {e}')
return
|
95e15c98f5c11cb55b6036305ab8fc0b440fcfa0
| 30,992 |
def _create_input_dict(function_graph,
func_arg_placeholders,
initial_value=None):
"""Create a mapping from graph tensor names to function tensor names."""
if initial_value is None:
input_dict = {}
else:
input_dict = dict(initial_value)
for op in function_graph.get_operations():
if _is_in_placeholders(op, func_arg_placeholders):
input_dict[op.name] = op.name
else:
op_def = _get_op_def(op)
attrs = _get_node_def(op).attr
o = 0
for arg_def in op_def.output_arg:
if arg_def.number_attr:
num = attrs[arg_def.number_attr].i
elif arg_def.type_list_attr:
num = len(attrs[arg_def.type_list_attr].list.type)
else:
num = 1
for i in range(num):
result = "%s:%s:%d" % (op.name, arg_def.name, i)
input_dict[op.values()[o].name] = result
if o == 0:
input_dict[op.name] = result
o += 1
return input_dict
|
8db84c7ba4cb13c13bf5ef54fe1ecff79b5765fc
| 30,993 |
def process(document, rtype=None, api=None):
""" Extracts spelling-corrected tokens in specified format from given texterra-annotated text. """
corrected_tokens = []
if annotationName in document['annotations']:
if rtype == 'annotation':
for token in document['annotations'][annotationName]:
corrected_tokens.append((token['start'], token['end'], token['value']))
elif rtype == 'token':
for token in document['annotations'][annotationName]:
corrected_tokens.append((document['text'][token['start']: token['end']], token['value']))
else: # rtype == 'full'
for token in document['annotations'][annotationName]:
corrected_tokens.append((token['start'], token['end'], document['text'][token['start']: token['end']],
token['value']))
return corrected_tokens
|
bbd2de604bcf9c280bd2fac8e5e0d0975a905bc9
| 30,994 |
from pathlib import Path
import hashlib
import base64
def get_hashed_path (path: Path, *, algorithm=hashlib.sha256, stretching_count: int=256) -> bytes:
"""
obfuscate a filename.
I recommend this function, if you want to hide from anybody guess a file information from file-name.
"""
p = Path(path)
return p.with_name(base64.urlsafe_b64encode(generate_hash_by_path(p, algorithm=algorithm, stretching_count=stretching_count)).decode("ascii"))
|
1981477e975465a23f73931d9496a71c8cf26e34
| 30,995 |
def quantile(values, q):
"""
Returns q-th quantile.
"""
values = sorted(values)
size = len(values)
idx = int(round(size * q)) - 1
if idx == 0:
raise ValueError("Sample size too small: %s" % len(values))
return values[idx]
|
614f6d9dbdf586b802d6380e2880df3659faa0c2
| 30,996 |
def parse_child_text(parent, selector, parser, index=0):
"""Parse the text content of the child element of parent as specified by the given CSS selector
If index is specified, parse the text content of the matching child element at the specified zero-based index; otherwise, parse the text content of the first matching child element.
"""
text = get_child_text(parent, selector, index)
if text is not None:
try:
return parser(text)
except ValueError:
return None
|
b10ec463dd572b4e6e256302d8db6e599638b1be
| 30,997 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.