content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def call_status():
"""
入浴状態を取得
入浴前:0
入浴中:1
入浴後:2
:return:
"""
user_id = "testuser"
result_dict = check_status(user_id)
return jsonify(result_dict)
|
9ef37eeb309c64cb7b4759323b4cb9569b910c65
| 31,674 |
def dice_loss(pred, target, smooth=1.):
"""Dice loss
"""
pred = pred.contiguous()
target = target.contiguous()
intersection = (pred * target).sum(dim=2).sum(dim=2)
loss = (1 - ((2. * intersection + smooth) / (pred.sum(dim=2).sum(dim=2) + target.sum(dim=2).sum(dim=2) + smooth)))
return loss.mean()
|
5879769ac379395e35f9accda9d917094aa07301
| 31,675 |
def _get_scope_contacts_by_object_id(connection, object_id,
object_type, scope_contact_type):
"""Gets scope contacts by object id.
Args:
connection: Database connection.
object_id: An integer value of scope object id.
object_type: A string value of scope object type.
scope_contact_type: A string value of scope contact.
Returns:
Set of emails by scope contacts.
"""
scope_contacts = connection.execute(
text("""
SELECT
`people`.`email`
FROM `access_control_people`
INNER JOIN `people`
ON `access_control_people`.`person_id` = `people`.`id`
INNER JOIN `access_control_list`
ON `access_control_people`.`ac_list_id` = `access_control_list`.`id`
INNER JOIN `access_control_roles`
ON `access_control_list`.`ac_role_id` = `access_control_roles`.`id`
WHERE
`access_control_roles`.`name` = :scope_contact_type AND
`access_control_list`.`object_id` = :object_id AND
`access_control_list`.`object_type` = :object_type
"""),
scope_contact_type=scope_contact_type,
object_id=object_id,
object_type=object_type).fetchall()
return {scope_contact.email for scope_contact in scope_contacts}
|
c7c1fe05cb48fb5e28499f995a8c606c9e421d6e
| 31,676 |
import re
def remove_mentions(text):
"""Remove @-mentions from the text"""
return re.sub('@\w+', '', text)
|
5cbdd40a602f24f8274369e92f9159cbb2f6a230
| 31,677 |
def put_thread(req_thread: ReqThreadPut):
"""Put thread for video to DynamoDB"""
input = create_update_item_input(req_thread)
try:
res = client.update_item(**input)
return res
except ClientError as err:
err_message = err.response["Error"]["Message"]
raise HTTPException(status_code=404, detail=err_message)
except BaseException as err:
raise HTTPException(status_code=404, detail=str(err))
|
733370296c022a985b49193a1b528e0df8271624
| 31,678 |
from admin.admin_blueprint import admin_blueprint
from questionnaire.questionnaire_blueprint import questionnaire_blueprint
from user.user_blueprint import user_blueprint
def create_app(config_name: str):
"""Application factory
Args:
config_name (str): the application config name to determine which env to run on
Returns:
The Flask application object
"""
app = Flask(__name__)
app.config.from_object(config[config_name])
db.init_app(app)
ma.init_app(app)
flask_bcrypt.init_app(app)
jwt.init_app(app)
admin.init_app(app)
redis_client.init_app(app)
app.register_blueprint(admin_blueprint)
app.register_blueprint(user_blueprint)
app.register_blueprint(questionnaire_blueprint)
@app.errorhandler(ValidationError)
def handle_exception(error):
"""Error handler called when a ValidationError Exception is raised"""
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.errorhandler(500)
def internal_server_error(error):
return {"message": "Internal Server Error", "status": "error"}, 500
@jwt.invalid_token_loader
def invalid_token_loader(expired_token):
return {
"status": "error",
"message": "Your session is invalid. Kindly login again",
}, 401
@jwt.unauthorized_loader
def no_auth_header_handler(error):
return {
"status": "error",
"message": "Authentication type should be a bearer type.",
}, 401
@jwt.expired_token_loader
def my_expired_token_handler(error):
return {
"status": "error",
"message": "Your session has expired. Kindly login again.",
}, 401
return app
|
13e171d780f87ffad0802703ab72483669bc3453
| 31,679 |
import numpy
def get_clean_num(num):
"""
Get the closest clean number match to num with bases 1, 2, 5.
Args:
num: the number
Returns:
the closest number
"""
if num == 0: return 0
sign = num > 0 and 1 or -1
exp = get_exp(num)
nums = numpy.array((1, 2, 5, 10))*(10**exp)
return sign*nums[numpy.argmin(numpy.abs(nums - abs(num)))]
|
09b4f5893e8d33a16217a2292ca75d7730e5d90e
| 31,680 |
def with_config_error(func):
"""Add config error context."""
@wraps(func)
def wrapper(*args, **kwargs):
with config_key_error():
return func(*args, **kwargs)
return wrapper
|
667ab25648c17d087fbc54fd9d284c98c40b4b0b
| 31,681 |
def _flat(l):
"""Flattens a list.
"""
f = []
for x in l:
f += x
return f
|
9b2e432d79f08840d417601ff950ff9fa28073ef
| 31,682 |
def axesDict(T_axes):
"""Check connectivity based on Interval Vectors."""
intervalList = [
T_axes[0],
T_axes[1],
T_axes[2],
(12 - T_axes[0]),
(12 - T_axes[1]),
(12 - T_axes[2])]
return intervalList
|
6b1e8c59d12a3c2c548b95f3bcd8d7a3de4ef931
| 31,683 |
def _settings_to_component(
name: str,
settings: configuration.ProjectSettings,
options: amos.Catalog) -> bases.Projectbases.Component:
"""[summary]
Args:
name (str): [description]
settings (configuration.ProjectSettings): [description]
options (amos.Catalog): [description]
Returns:
bases.Projectbases.Component: [description]
"""
design = settings.designs.get(name, None)
kind = settings.kinds.get(name, None)
lookups = _get_lookups(name = name, design = design, kind = kind)
base = _get_base(lookups = lookups, options = options)
parameters = amos.get_annotations(item = base)
attributes, initialization = _parse_initialization(
name = name,
settings = settings,
parameters = parameters)
initialization['parameters'] = _get_runtime(
lookups = lookups,
settings = settings)
component = base(name = name, **initialization)
for key, value in attributes.items():
setattr(component, key, value)
return component
|
7e709a27b275587ce742c08d10efbd7b0aa171ce
| 31,684 |
import ipdb
import tqdm
def iss(data, gamma21, gamma32, KDTree_radius, NMS_radius, max_num=100):
"""
Description: intrinsic shape signatures algorithm based on cuda FRNN and cuda maximum suppression
Args:
data: numpy array of point cloud, shape(num_points, 3)
gamma21:
gamma32:
KDTree_radius: radiusNN range
NMS_radius:
max_num: max number of keypoints
Return:
is_keypoint->list[bool]: mask indicating whether point is a keypoint or not
"""
# import ipdb;ipdb.set_trace()
print(f'iss algo started...{data.shape[0]} of points prepared')
#transfer data dtype to float32 before processing it on GPU
temp_data = data.astype(np.float32)
# create mask to indicate whether point i and point j are within range r
adj = gpu_frnn(temp_data,KDTree_radius)
adj = adj.reshape((data.shape[0],data.shape[0]))
# initialize empty list to store neighbor points
r_list = []
l3_list = []
is_keypoint = np.full(data.shape[0],False)
# number of neighbor points for each point
weights = np.sum(adj,axis=1)
for i in tqdm(range(data.shape[0])):
indices = np.argwhere(adj[i,:]>0)[:,0]
weight = 1 / weights[indices]
neighbors = data[indices]
# store neighbor indices
r_list.append(indices)
# (pj - pi) in matrix format
P = neighbors - data[i]
# Compute Weighted covariance matrix Cov(pi)
Cov = weight * P.T @ P / np.sum(weight)
# Compute eigenvalues of Cov(pi) as lambda_1, lambda_2, lambda_3 in descending order
e_values, e_vectors = np.linalg.eig(Cov)
l1, l2, l3 = e_values[np.argsort(-e_values)]
# Store point's lambda_3 value
l3_list.append(l3)
# Initialize keypoint proposal with the criterion: l2 / l1 < g21 and l3 / l2 < g32
if l2 / l1 < gamma21 and l3 / l2 < gamma32:
is_keypoint[i] = True
ipdb.set_trace()
print("performing nms based on cuda")
is_keypoint = is_keypoint.astype(np.int32)
is_keypoint_idx = np.argwhere(is_keypoint==1)[:,0].astype(np.int32)
l3_array = np.asarray(l3_list).astype(np.float32)
gpu_nms(is_keypoint,
is_keypoint_idx,
adj,
l3_array)
is_keypoint = is_keypoint.astype(bool)
"""
# For each point (pi) in the point cloud
for i in tqdm(range(len(is_keypoint))):
# Initialize an empty list to store keypoints' indices and lambda_3 values
keypoint_list = []
# If the point itself is a keypoint
if is_keypoint[i]:
# Store its index and lambda_3 value
keypoint_list.append([i, l3_list[i]])
# for each neighbor
for j in r_list[i]:
# If the neighbor is itself, skip
if j == i:
continue
# If the neighbor is a keypoint
if is_keypoint[j]:
# Store its index and lambda_3 value
keypoint_list.append([j, l3_list[j]])
# If there is no keypoints in keypoint_list, skip
if len(keypoint_list) == 0:
continue
# Convert keypoint_list to numpy ndarray
keypoint_list = np.asarray(keypoint_list)
# Sort keypoint_list using lambda_3 value in descending order
keypoint_list = keypoint_list[keypoint_list[:, -1].argsort()[::-1]]
# Only the keypoint with the largest lambda_3 value is considered as the final keypoint
# Get all the indices to be suppressed except for the first row
filter_ind = keypoint_list[1:, 0].astype(int)
# Set keypoint status of point at those indices to False
is_keypoint[filter_ind] = False
"""
return is_keypoint
|
bf2b84ed179314334a6e7a88f84f6f86468006dd
| 31,685 |
import requests
def login_wechat():
"""
This api logins a user through wechat app.
"""
code = request.json.get('code', None)
wechat_code2session_url = 'https://api.weixin.qq.com/sns/jscode2session'
payload = {
'appid': current_app.config['WECHAT_APPID'],
'secret': current_app.config['WECHAT_APP_SECRET'],
'js_code': code,
'grant_type': 'authorization_code'
}
r = requests.get(wechat_code2session_url, params=payload)
if "errcode" in str(r.content):
return jsonify(message="Something wrong with the code"), 201
openid = r.json().get('openid', None)
session_key = r.json().get('session_key', None)
# check if the openid already exists in the DB.
user = query_existing_openid_user(openid)
if not user:
user = User()
user.openid = openid
user.session_key = session_key
db.session.add(user)
db.session.commit()
access_token = create_access_token(identity=user.to_dict())
refresh_token = create_refresh_token(identity=user.to_dict())
ret = {
'access_token': access_token,
'refresh_token': refresh_token
}
add_token_to_db(access_token, current_app.config['JWT_IDENTITY_CLAIM'])
add_token_to_db(refresh_token, current_app.config['JWT_IDENTITY_CLAIM'])
return jsonify(ret), 201
|
985eddbcb39ade8aebad3d9d179a0b174df50280
| 31,688 |
def is_no_entitled(request):
"""Check condition for needing to entitled user."""
no_entitled_list = ["source-status"]
no_auth = any(no_auth_path in request.path for no_auth_path in no_entitled_list)
return no_auth
|
feee0962568b20c685fd85096ce00dbb91b91fe5
| 31,689 |
def _make_selector_from_key_distribution_options(
options) -> reverb_types.SelectorType:
"""Returns a Selector from its KeyDistributionOptions description."""
one_of = options.WhichOneof('distribution')
if one_of == 'fifo':
return item_selectors.Fifo()
if one_of == 'uniform':
return item_selectors.Uniform()
if one_of == 'prioritized':
return item_selectors.Prioritized(options.prioritized.priority_exponent)
if one_of == 'heap':
if options.heap.min_heap:
return item_selectors.MinHeap()
return item_selectors.MaxHeap()
if one_of == 'lifo':
return item_selectors.Lifo()
raise ValueError(f'Unknown distribution field: {one_of}')
|
3b932328f7b3e226e3dada54c8f1ca08e32167af
| 31,690 |
import base64
def json_numpy_obj_hook(dct):
"""
Decodes a previously encoded numpy ndarray
with proper shape and dtype
from: http://stackoverflow.com/a/27948073/5768001
:param dct: (dict) json encoded ndarray
:return: (ndarray) if input was an encoded ndarray
"""
if isinstance(dct, dict) and '__ndarray__' in dct:
data = base64.b64decode(dct['__ndarray__'])
return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])
return dct
|
50aab4855d63206534981bce95ec0219dec9724e
| 31,691 |
from typing import Optional
import copy
def redirect_edge(state: SDFGState,
edge: graph.MultiConnectorEdge[Memlet],
new_src: Optional[nodes.Node] = None,
new_dst: Optional[nodes.Node] = None,
new_src_conn: Optional[str] = None,
new_dst_conn: Optional[str] = None,
new_data: Optional[str] = None,
new_memlet: Optional[Memlet] = None) -> graph.MultiConnectorEdge[Memlet]:
"""
Redirects an edge in a state. Choose which elements to override by setting
the keyword arguments.
:param state: The SDFG state in which the edge resides.
:param edge: The edge to redirect.
:param new_src: If provided, redirects the source of the new edge.
:param new_dst: If provided, redirects the destination of the new edge.
:param new_src_conn: If provided, renames the source connector of the edge.
:param new_dst_conn: If provided, renames the destination connector of the
edge.
:param new_data: If provided, changes the data on the memlet of the edge,
and the entire associated memlet tree.
:param new_memlet: If provided, changes only the memlet of the new edge.
:return: The new, redirected edge.
:note: ``new_data`` and ``new_memlet`` cannot be used at the same time.
"""
if new_data is not None and new_memlet is not None:
raise ValueError('new_data and new_memlet cannot both be given.')
mtree = None
if new_data is not None:
mtree = state.memlet_tree(edge)
state.remove_edge(edge)
if new_data is not None:
memlet = copy.deepcopy(edge.data)
memlet.data = new_data
# Rename on full memlet tree
for e in mtree:
e.data.data = new_data
else:
memlet = new_memlet or edge.data
new_edge = state.add_edge(new_src or edge.src, new_src_conn or edge.src_conn, new_dst or edge.dst, new_dst_conn
or edge.dst_conn, memlet)
return new_edge
|
368ff8dace5b781d05f7e75fe9d57cae648aee9d
| 31,692 |
def svn_fs_lock(*args):
"""
svn_fs_lock(svn_fs_t fs, char path, char token, char comment, svn_boolean_t is_dav_comment,
apr_time_t expiration_date,
svn_revnum_t current_rev, svn_boolean_t steal_lock,
apr_pool_t pool) -> svn_error_t
"""
return _fs.svn_fs_lock(*args)
|
f711b280a24f5d3c595a81013d1dc5275a997a60
| 31,693 |
def maybe_double_last(hand):
"""
:param hand: list - cards in hand.
:return: list - hand with Jacks (if present) value doubled.
"""
if hand[-1] == 11:
hand[-1] = 22
return hand
|
378546e8dd650a67a5d9d9eed490969fd085bfb1
| 31,694 |
def length(draw, min_value=0, max_value=None):
"""Generates the length for Blast+6 file format.
Arguments:
- `min_value`: Minimum value of length to generate.
- `max_value`: Maximum value of length to generate.
"""
return draw(integers(min_value=min_value, max_value=max_value))
|
e3ac6b5d9bcc6380e475785047438b3be8a81288
| 31,695 |
def ppw(text):
"""PPW -- Percentage of Polysyllabic Words."""
ppw = None
polysyllabic_words_num = 0
words_num, words = word_counter(text, 'en')
for word in words:
if syllable_counter(word) >= 3:
polysyllabic_words_num += 1
if words_num != 0:
ppw = polysyllabic_words_num / words_num
return ppw
|
b8c8c92a4947404a7166e63458e7d4eb2f9a00fc
| 31,696 |
def is_running_in_azure_ml(aml_run: Run = RUN_CONTEXT) -> bool:
"""
Returns True if the given run is inside of an AzureML machine, or False if it is on a machine outside AzureML.
When called without arguments, this functions returns True if the present code is running in AzureML.
Note that in runs with "compute_target='local'" this function will also return True. Such runs execute outside
of AzureML, but are able to log all their metrics, etc to an AzureML run.
:param aml_run: The run to check. If omitted, use the default run in RUN_CONTEXT
:return: True if the given run is inside of an AzureML machine, or False if it is a machine outside AzureML.
"""
return hasattr(aml_run, "experiment")
|
3d2d6bcf95c34def5fff9c8bf1053c785b075895
| 31,697 |
def format_test_case(test_case):
"""Format test case from `-[TestClass TestMethod]` to `TestClass_TestMethod`.
Args:
test_case: (basestring) Test case id in format `-[TestClass TestMethod]` or
`[TestClass/TestMethod]`
Returns:
(str) Test case id in format TestClass/TestMethod.
"""
test_case = _sanitize_str(test_case)
test = test_case.replace('[', '').replace(']',
'').replace('-',
'').replace(' ', '/')
return test
|
f2d4530fbcc9d07409bfc7a88225653a7f550185
| 31,698 |
def _truncate_seed(seed):
"""
Truncate the seed with MAXINT32.
Args:
seed (int): The seed to be truncated.
Returns:
Integer. The seed with MAXINT32.
"""
return seed % _MAXINT32
|
2caf14236ec1697d6ab7144604e0f2be05d525d2
| 31,699 |
import torch
def get_default_device():
""" Using GPU if available or CPU """
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
|
ff65f896938b9e53b78d3a6578883129bc886204
| 31,700 |
def build_stateless_broadcaster():
"""Just tff.federated_broadcast with empty state, to use as a default."""
return tff.utils.StatefulBroadcastFn(
initialize_fn=lambda: (),
next_fn=lambda state, value: ( # pylint: disable=g-long-lambda
state, tff.federated_broadcast(value)))
|
6d78f3f452551cb2eb7640bb09ee7541c5a752bd
| 31,701 |
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testLoad"
, "testIntCounterSet"
, "testFloatCounterSet"
, "testIntCounter"
, "testIntCounterPublish"
, "testIntCounter2"
, "testIntCounter3"
, "testIntCounter2Reset"
, "testIntCounter3Reset"
, "testIntCounterDec"
, "testIntCounterDec2"
, "testIntCounterDec3"
, "testIntCounterDec2Reset"
, "testIntCounterDec3Reset"
, "testFloatCounter"
, "testFloatCounterDec"
, "testIntCounterReset"
],
"zzcomponent":
[ "testComponents"
],
"zzintegration":
[ "testIntegration"
],
"zzpending":
[ "testPending"
]
}
return TestUtils.getTestSuite(TestCounters, testdict, select=select)
|
1816286c04b8b7a2e994522622a5f567869cad48
| 31,702 |
from typing import Union
from pathlib import Path
def find_mo(search_paths=None) -> Union[Path, None]:
"""
Args:
search_paths: paths where ModelOptimizer may be found. If None only default paths is used.
Returns:
path to the ModelOptimizer or None if it wasn't found.
"""
default_mo_path = ('intel', 'openvino', 'deployment_tools', 'model_optimizer')
default_paths = [Path.home().joinpath(*default_mo_path), Path('/opt').joinpath(*default_mo_path)]
executable = 'mo.py'
for path in search_paths or default_paths:
path = Path(path)
if not path.is_dir():
continue
mo = path / executable
if not mo.is_file():
continue
return mo
return None
|
4657e15649692415dd10f2daa6527cade351d8fc
| 31,703 |
def autoaugment(dataset_path, repeat_num=1, batch_size=32, target="Ascend"):
"""
define dataset with autoaugment
"""
if target == "Ascend":
device_num, rank_id = _get_rank_info()
else:
init("nccl")
rank_id = get_rank()
device_num = get_group_size()
if device_num == 1:
ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True)
else:
ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True,
num_shards=device_num, shard_id=rank_id)
image_size = 224
mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
trans = [
c_vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
]
post_trans = [
c_vision.RandomHorizontalFlip(prob=0.5),
c_vision.Normalize(mean=mean, std=std),
c_vision.HWC2CHW()
]
dataset = ds.map(operations=trans, input_columns="image")
dataset = dataset.map(operations=c_vision.RandomSelectSubpolicy(imagenet_policy), input_columns=["image"])
dataset = dataset.map(operations=post_trans, input_columns="image")
type_cast_op = c_transforms.TypeCast(mstype.int32)
dataset = dataset.map(operations=type_cast_op, input_columns="label")
# apply the batch operation
dataset = dataset.batch(batch_size, drop_remainder=True)
# apply the repeat operation
dataset = dataset.repeat(repeat_num)
return dataset
|
596eb26fe376298327900a240d07c89f6914f76d
| 31,704 |
def dropout_mask(x, sz, dropout):
""" Applies a dropout mask whose size is determined by passed argument 'sz'.
Args:
x (torch.Tensor): A torch Variable object
sz (tuple(int, int, int)): The expected size of the new tensor
dropout (float): The dropout fraction to apply
This method uses the bernoulli distribution to decide which activations to keep.
Additionally, the sampled activations is rescaled is using the factor 1/(1 - dropout).
In the example given below, one can see that approximately .8 fraction of the
returned tensors are zero. Rescaling with the factor 1/(1 - 0.8) returns a tensor
with 5's in the unit places.
The official link to the pytorch bernoulli function is here:
http://pytorch.org/docs/master/torch.html#torch.bernoulli
Examples:
>>> a_Var = torch.autograd.Variable(torch.Tensor(2, 3, 4).uniform_(0, 1), requires_grad=False)
>>> a_Var
Variable containing:
(0 ,.,.) =
0.6890 0.5412 0.4303 0.8918
0.3871 0.7944 0.0791 0.5979
0.4575 0.7036 0.6186 0.7217
(1 ,.,.) =
0.8354 0.1690 0.1734 0.8099
0.6002 0.2602 0.7907 0.4446
0.5877 0.7464 0.4257 0.3386
[torch.FloatTensor of size 2x3x4]
>>> a_mask = dropout_mask(a_Var.data, (1,a_Var.size(1),a_Var.size(2)), dropout=0.8)
>>> a_mask
(0 ,.,.) =
0 5 0 0
0 0 0 5
5 0 5 0
[torch.FloatTensor of size 1x3x4]
"""
return x.new_empty(*sz).bernoulli_(1-dropout)/(1-dropout)
|
ae6aebad62fa97014227f4ac68bca68f2eafe95f
| 31,705 |
def two_body_mc_force_en_jit(bond_array_1, c1, etypes1,
bond_array_2, c2, etypes2,
d1, sig, ls, r_cut, cutoff_func,
nspec, spec_mask, bond_mask):
"""Multicomponent two-body force/energy kernel accelerated with
Numba's njit decorator."""
kern = 0
ls1 = 1 / (2 * ls * ls)
ls2 = 1 / (ls * ls)
sig2 = sig * sig
bc1 = spec_mask[c1]
bc1n = bc1 * nspec
for m in range(bond_array_1.shape[0]):
ri = bond_array_1[m, 0]
ci = bond_array_1[m, d1]
fi, fdi = cutoff_func(r_cut, ri, ci)
e1 = etypes1[m]
be1 = spec_mask[e1]
btype = bond_mask[bc1n + be1]
tls1 = ls1[btype]
tls2 = ls2[btype]
tsig2 = sig2[btype]
for n in range(bond_array_2.shape[0]):
e2 = etypes2[n]
# check if bonds agree
if (c1 == c2 and e1 == e2) or (c1 == e2 and c2 == e1):
rj = bond_array_2[n, 0]
fj, _ = cutoff_func(r_cut, rj, 0)
r11 = ri - rj
B = r11 * ci
D = r11 * r11
kern += force_energy_helper(B, D, fi, fj, fdi,
tls1, tls2,
tsig2)
return kern
|
c027a874b1662d0b9c954302cc3d0b26f77f9a21
| 31,706 |
def customize_hrm_programme(**attr):
"""
Customize hrm_programme controller
"""
# Organisation needs to be an NS/Branch
ns_only(current.s3db.hrm_programme.organisation_id,
required=False,
branches=False,
)
return attr
|
3ef74f74e09b9c4498700b9f0d20245829d48c42
| 31,707 |
def freq_count(line, wrddict, win, ctxcounter, wrdcounter):
"""
Counts words and context words of a string.
line: The sentence as a string.
wrddict: Word index mapping.
win: Word context window size.
ctxcounter: Context Counter.
wrdcounter: Word Counter.
"""
if not (isinstance(line, str)):
raise TypeError("NOT A STRING")
words = line.split()
cnt = 0
for word in words:
if word in wrddict:
wrdcounter[word] += 1
ctx = 1
while ctx <= win:
if (cnt + ctx) < len(words):
ctxword = words[cnt + ctx]
if ctxword in wrddict:
ctxcounter[ctxword] += 1
ctxcounter[word] += 1
ctx += 1
else:
break
cnt += 1
return wrdcounter, ctxcounter
|
87ebe01058f8958f5ffe06e0944068c10b26ae44
| 31,708 |
def _onehot_encoding_unk(x, allowable_set):
"""Maps inputs not in the allowable set to the last element."""
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
|
3b386c6640bd5e37a6ab2276e090c8ea56eed5ce
| 31,709 |
import re
def getChironSpec(obnm, normalized=True, slit='slit', normmech='flat', returnFlat=False):
"""PURPOSE: To retrieve a CHIRON spectrum given the observation
name (obnm)."""
#extract the date (yymmdd) from the obnm:
date = re.search(r'chi(\d{6})', obnm).group(1)
#extract the core of the obnm. This will make the code more
#robust, allowing people to enter the obnm with or without
#the 'a' or 'chi' or 'fits', etc.
#output obnm is in the format 'chiyymmdd.####'
obnm = re.search(r'(chi\d{6}\.\d{4})', obnm).group(1)
scihdu = fits.open('/tous/mir7/fitspec/'+date+'/a'+obnm+'.fits')
scidata = scihdu[0].data
if normalized:
#generate the flat filename:
flatfn = '/tous/mir7/flats/chi'+date+'.'+slit+'flat.fits'
flathdu = fits.open(flatfn)
flatdata = flathdu[0].data
#create a 2D array to store the output (wav/spec, #orders, px/order):
normspecout = np.zeros([flatdata.shape[1], flatdata.shape[2], 2])
normflatout = np.zeros([flatdata.shape[1], flatdata.shape[2], 2])
#cycle through orders
for ord in range(flatdata.shape[1]):
#now retrieve the normalized polynomial fit to the master flat:
normfit = flatdata[2, 61 - ord, :]/np.max(flatdata[2, 61 - ord, :])
normflatout[ord, :, 1] = flatdata[1, 61 - ord, :]/np.max(flatdata[1, 61 - ord, :])
normflatout[ord, :, 0] = scidata[ord, :, 0]
#superimpose stellar spec
normspec_init = scidata[ord, :, 1]/np.max(scidata[ord, :, 1])
normspec = normspec_init/normfit[::-1]
#determine the number of maximum values to
#use in the normalization. In this case we
#will use the top 0.5%, which corresponds
#to 16 elements for CHIRON:
nummax = np.int(np.ceil(0.005 * len(normspec)))
#now sort normspec and find the median of the
#`nummax` highest values in the old normspec
mnhghval = np.median(np.sort(normspec)[-nummax:-1])
#now renormalize by that value:
normspecout[ord, :, 1] = normspec / mnhghval
normspecout[ord, :, 0] = scidata[ord, :, 0]
if returnFlat:
return normflatout
else:
return normspecout
else:
return scidata
|
a29090e0838f93feeb8ad758beb1bd563b78178e
| 31,710 |
import posixpath
def api_routes(api_classes, base_path='/_ah/api', regex='[^/]+'):
"""Creates webapp2 routes for the given Endpoints v1 services.
Args:
api_classes: A list of protorpc.remote.Service classes to create routes for.
base_path: The base path under which all service paths should exist. If
unspecified, defaults to /_ah/api.
regex: Regular expression to allow in path parameters.
Returns:
A list of webapp2.Routes.
"""
routes = []
# Add routes for each class.
for api_class in api_classes:
api_base_path = '%s/%s/%s' % (
base_path, api_class.api_info.name, api_class.api_info.version)
templates = set()
# Add routes for each method of each class.
for _, method in sorted(api_class.all_remote_methods().items()):
info = method.method_info
method_path = info.get_path(api_class.api_info)
method_path = method_path.replace('{', '<').replace('}', ':%s>' % regex)
t = posixpath.join(api_base_path, method_path)
http_method = info.http_method.upper() or 'POST'
handler = path_handler(api_class, method, api_base_path)
routes.append(webapp2.Route(t, handler, methods=[http_method]))
templates.add(t)
# Add routes for HTTP OPTIONS (to add CORS headers) for each method.
for t in sorted(templates):
routes.append(webapp2.Route(t, CorsHandler, methods=['OPTIONS']))
# Add generic routes.
routes.extend([
directory_service_route(api_classes, base_path),
discovery_service_route(api_classes, base_path),
explorer_proxy_route(base_path),
explorer_redirect_route(base_path),
])
return routes
|
47cd1da8300f010e1c3ef7ee8bca21d7139a40ad
| 31,711 |
def WrapReportText(text):
"""Helper to allow report string wrapping (e.g. wrap and indent).
Actually invokes textwrap.fill() which returns a string instead of a list.
We always double-indent our wrapped blocks.
Args:
text: String text to be wrapped.
Returns:
String of wrapped and indented text.
"""
return wrapper.fill(text)
|
4818f0d777c8165fd7a762033379741781bf48af
| 31,712 |
import math
def point_in_wave(point_x, frequency, amplitude, offset_x, offset_y):
"""Returns the specified point x in the wave of specified parameters."""
return (math.sin((math.pi * point_x)/frequency + offset_x) * amplitude) + offset_y
|
5a91c9204819492bb3bd42f0d4c9231d39e404d8
| 31,713 |
def tile(A, reps):
"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Note : Although tile may be used for broadcasting, it is strongly
recommended to use numpy's broadcasting operations and functions.
Parameters
----------
A : array_like
The input array.
reps : array_like
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
See Also
--------
repeat : Repeat elements of an array.
broadcast_to : Broadcast an array to a new shape
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0, 1, 2, 0, 1, 2])
>>> np.tile(a, (2, 2))
array([[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2]])
>>> np.tile(a, (2, 1, 2))
array([[[0, 1, 2, 0, 1, 2]],
[[0, 1, 2, 0, 1, 2]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> np.tile(b, (2, 1))
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
>>> c = np.array([1,2,3,4])
>>> np.tile(c,(4,1))
array([[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4]])
"""
try:
tup = tuple(reps)
except TypeError:
tup = (reps,)
d = len(tup)
if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):
# Fixes the problem that the function does not make a copy if A is a
# numpy array and the repetitions are 1 in all dimensions
return _nx.array(A, copy=True, subok=True, ndmin=d)
else:
# Note that no copy of zero-sized arrays is made. However since they
# have no data there is no risk of an inadvertent overwrite.
c = _nx.array(A, copy=False, subok=True, ndmin=d)
if (d < c.ndim):
tup = (1,)*(c.ndim-d) + tup
shape_out = tuple(s*t for s, t in zip(c.shape, tup))
n = c.size
if n > 0:
for dim_in, nrep in zip(c.shape, tup):
if nrep != 1:
c = c.reshape(-1, n).repeat(nrep, 0)
n //= dim_in
return c.reshape(shape_out)
|
446247517aaaaecff377571a14384a2bbd0c949f
| 31,714 |
import requests
def get_super_user_token(endpoint):
"""
Gets the initialized super user token.
This is one time, cant get the token again once initialized.
Args:
endpoint (str): Quay Endpoint url
Returns:
str: Super user token
"""
data = (
f'{{"username": "{constants.QUAY_SUPERUSER}", "password": "{constants.QUAY_PW}", '
f'"email": "[email protected]", "access_token": true}}'
)
r = requests.post(
f"{endpoint}/{constants.QUAY_USER_INIT}",
headers={"content-type": "application/json"},
data=data,
verify=False,
)
return r.json()["access_token"]
|
bf5782fe3cc563b70d7fbd925b4f06e9d29fba1a
| 31,715 |
import torch
def to_input_variable(sequences, vocab, cuda=False, training=True):
"""
given a list of sequences,
return a tensor of shape (max_sent_len, batch_size)
"""
word_ids = word2id(sequences, vocab)
sents_t, masks = input_transpose(word_ids, vocab['<pad>'])
if type(sents_t[0][0]) != list:
with torch.no_grad():
sents_var = Variable(torch.LongTensor(sents_t), requires_grad=False)
if cuda:
sents_var = sents_var.cpu()
else:
sents_var = sents_t
return sents_var
|
3dea99cdf94a06ce3f1b1be02a49f0d8396cb140
| 31,716 |
def map_to_docs(solr_response):
"""
Response mapper that only returns the list of result documents.
"""
return solr_response['response']['docs']
|
2661b9075c05a91c241342151d713702973b9c12
| 31,718 |
def get_config_type(service_name):
"""
get the config type based on service_name
"""
if service_name == "HDFS":
type = "hdfs-site"
elif service_name == "HDFS":
type = "core-site"
elif service_name == "MAPREDUCE":
type = "mapred-site"
elif service_name == "HBASE":
type = "hbase-site"
elif service_name == "OOZIE":
type = "oozie-site"
elif service_name == "HIVE":
type = "hive-site"
elif service_name == "WEBHCAT":
type = "webhcat-site"
else:
type = "global"
return type
|
96793f932334eb8e4a5460767a80ee6a989cee22
| 31,719 |
def regression_model(X, y, alpha=.5):
"""
trains a simple ridge regession model
Args:
X:
y:
alpha:
Returns: model
"""
reg = linear_model.Ridge(alpha=alpha, fit_intercept=True)
# reg = linear_model.Lasso(alpha = alpha,fit_intercept = True)
reg.fit(X, y)
return reg
|
f15741ac95a8738e031d6eb77f9d5bed76f4958d
| 31,720 |
def array2d_export(f, u2d, fmt=None, **kwargs):
"""
export helper for Util2d instances
Parameters
----------
f : str
filename or existing export instance type (NetCdf only for now)
u2d : Util2d instance
fmt : str
output format flag. 'vtk' will export to vtk
**kwargs : keyword arguments
min_valid : minimum valid value
max_valid : maximum valid value
modelgrid : flopy.discretization.Grid
model grid instance which will supercede the flopy.model.modelgrid
if fmt is set to 'vtk', parameters of vtk.export_array
"""
assert isinstance(u2d, DataInterface), (
"util2d_helper only helps " "instances that support " "DataInterface"
)
assert len(u2d.array.shape) == 2, "util2d_helper only supports 2D arrays"
min_valid = kwargs.get("min_valid", -1.0e9)
max_valid = kwargs.get("max_valid", 1.0e9)
modelgrid = u2d.model.modelgrid
if "modelgrid" in kwargs:
modelgrid = kwargs.pop("modelgrid")
if isinstance(f, str) and f.lower().endswith(".nc"):
f = NetCdf(f, u2d.model, **kwargs)
if isinstance(f, str) and f.lower().endswith(".shp"):
name = shapefile_utils.shape_attr_name(u2d.name, keep_layer=True)
shapefile_utils.write_grid_shapefile(f, modelgrid, {name: u2d.array})
return
elif isinstance(f, str) and f.lower().endswith(".asc"):
export_array(modelgrid, f, u2d.array, **kwargs)
return
elif isinstance(f, NetCdf) or isinstance(f, dict):
# try to mask the array - assume layer 1 ibound is a good mask
# f.log("getting 2D array for {0}".format(u2d.name))
array = u2d.array
# f.log("getting 2D array for {0}".format(u2d.name))
with np.errstate(invalid="ignore"):
if array.dtype not in [int, np.int, np.int32, np.int64]:
if (
modelgrid.idomain is not None
and "ibound" not in u2d.name.lower()
and "idomain" not in u2d.name.lower()
):
array[modelgrid.idomain[0, :, :] == 0] = np.NaN
array[array <= min_valid] = np.NaN
array[array >= max_valid] = np.NaN
mx, mn = np.nanmax(array), np.nanmin(array)
else:
mx, mn = np.nanmax(array), np.nanmin(array)
array[array <= min_valid] = netcdf.FILLVALUE
array[array >= max_valid] = netcdf.FILLVALUE
if (
modelgrid.idomain is not None
and "ibound" not in u2d.name.lower()
and "idomain" not in u2d.name.lower()
and "icbund" not in u2d.name.lower()
):
array[modelgrid.idomain[0, :, :] == 0] = netcdf.FILLVALUE
var_name = u2d.name
if isinstance(f, dict):
f[var_name] = array
return f
array[np.isnan(array)] = f.fillvalue
units = "unitless"
if var_name in NC_UNITS_FORMAT:
units = NC_UNITS_FORMAT[var_name].format(
f.grid_units, f.time_units
)
precision_str = NC_PRECISION_TYPE[u2d.dtype]
if var_name in NC_LONG_NAMES:
attribs = {"long_name": NC_LONG_NAMES[var_name]}
else:
attribs = {"long_name": var_name}
attribs["coordinates"] = "latitude longitude"
attribs["units"] = units
attribs["min"] = mn
attribs["max"] = mx
if np.isnan(attribs["min"]) or np.isnan(attribs["max"]):
raise Exception("error processing {0}: all NaNs".format(var_name))
try:
var = f.create_variable(
var_name,
attribs,
precision_str=precision_str,
dimensions=f.dimension_names[1:],
)
except Exception as e:
estr = "error creating variable {0}:\n{1}".format(var_name, str(e))
f.logger.warn(estr)
raise Exception(estr)
try:
var[:] = array
except Exception as e:
estr = "error setting array to variable {0}:\n{1}".format(
var_name, str(e)
)
f.logger.warn(estr)
raise Exception(estr)
return f
elif fmt == "vtk":
# call vtk array export to folder
name = kwargs.get("name", u2d.name)
nanval = kwargs.get("nanval", -1e20)
smooth = kwargs.get("smooth", False)
point_scalars = kwargs.get("point_scalars", False)
vtk_grid_type = kwargs.get("vtk_grid_type", "auto")
true2d = kwargs.get("true2d", False)
binary = kwargs.get("binary", False)
vtk.export_array(
u2d.model,
u2d.array,
f,
name,
nanval=nanval,
smooth=smooth,
point_scalars=point_scalars,
array2d=True,
vtk_grid_type=vtk_grid_type,
true2d=true2d,
binary=binary,
)
else:
raise NotImplementedError("unrecognized export argument:{0}".format(f))
|
59c7962d24a688eabebe069500f49d01aef80c28
| 31,721 |
def not_daily(request):
"""
Several timedelta-like and DateOffset instances that are _not_
compatible with Daily frequencies.
"""
return request.param
|
e30563d0b6ee62cd995908045ddc356ca58b5796
| 31,722 |
from pandas.core.reshape.concat import concat
import itertools
from typing import List
def get_dummies(
data,
prefix=None,
prefix_sep="_",
dummy_na=False,
columns=None,
sparse=False,
drop_first=False,
dtype=None,
) -> "DataFrame":
"""
Convert categorical variable into dummy/indicator variables.
Parameters
----------
data : array-like, Series, or DataFrame
Data of which to get dummy indicators.
prefix : str, list of str, or dict of str, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : str, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix`.
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy-encoded columns should be backed by
a :class:`SparseArray` (True) or a regular NumPy array (False).
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
Dummy-coded data.
See Also
--------
Series.str.get_dummies : Convert Series to dummy codes.
Examples
--------
>>> s = pd.Series(list('abca'))
>>> pd.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> pd.get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]})
>>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pd.get_dummies(pd.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
>>> pd.get_dummies(pd.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
dtypes_to_encode = ["object", "category"]
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
data_to_encode = data.select_dtypes(include=dtypes_to_encode)
elif not is_list_like(columns):
raise TypeError("Input must be a list-like for parameter `columns`")
else:
data_to_encode = data[columns]
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
if is_list_like(item):
if not len(item) == data_to_encode.shape[1]:
len_msg = (
f"Length of '{name}' ({len(item)}) did not match the "
"length of the columns being encoded "
f"({data_to_encode.shape[1]})."
)
raise ValueError(len_msg)
check_len(prefix, "prefix")
check_len(prefix_sep, "prefix_sep")
if isinstance(prefix, str):
prefix = itertools.cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in data_to_encode.columns]
if prefix is None:
prefix = data_to_encode.columns
# validate separators
if isinstance(prefix_sep, str):
prefix_sep = itertools.cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]
with_dummies: List[DataFrame]
if data_to_encode.shape == data.shape:
# Encoding the entire df, do not prepend any dropped columns
with_dummies = []
elif columns is not None:
# Encoding only cols specified in columns. Get all cols not in
# columns to prepend to result.
with_dummies = [data.drop(columns, axis=1)]
else:
# Encoding only object and category dtype columns. Get remaining
# columns to prepend to result.
with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]
for (col, pre, sep) in zip(data_to_encode.items(), prefix, prefix_sep):
# col is (column_name, column), use just column data here
dummy = _get_dummies_1d(
col[1],
prefix=pre,
prefix_sep=sep,
dummy_na=dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(
data,
prefix,
prefix_sep,
dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
)
return result
|
a3be8d5a3f56d438d1182254749b2967d7bf48fd
| 31,723 |
import io
def get_call_xlsx(call, submitted=False, proposals=None):
"""Return the content of an XLSX file for all proposals in a call.
Optionally only the submitted ones.
Optionally for the given list proposals.
"""
if proposals is None:
title = f"Proposals in {call['identifier']}"
proposals = get_call_proposals(call, submitted=submitted)
else:
title = f"Selected proposals in {call['identifier']}"
score_fields = get_review_score_fields(call, proposals)
rank_fields, rank_errors = get_review_rank_fields_errors(call, proposals)
output = io.BytesIO()
wb = xlsxwriter.Workbook(output, {'in_memory': True})
head_text_format = wb.add_format({'bold': True,
'text_wrap': True,
'bg_color': '#9ECA7F',
'font_size': 15,
'align': 'center',
'border': 1})
normal_text_format = wb.add_format({'font_size': 14,
'align': 'left',
'valign': 'vcenter'})
ws = wb.add_worksheet(title[:31])
ws.freeze_panes(1, 1)
ws.set_row(0, 60, head_text_format)
ws.set_column(1, 1, 40, normal_text_format)
ws.set_column(2, 2, 10, normal_text_format)
ws.set_column(3, 4, 20, normal_text_format)
nrow = 0
row = ['Proposal', 'Proposal title']
row.extend(['Submitted', 'Submitter', 'Email', 'Affiliation'])
ncol = len(row)
for field in call['proposal']:
row.append(field['title'] or field['identifier'].capitalize())
if field['type'] in (constants.LINE, constants.EMAIL):
ws.set_column(ncol, ncol, 40, normal_text_format)
elif field['type'] == constants.TEXT:
ws.set_column(ncol, ncol, 60, normal_text_format)
ncol += 1
allow_view_reviews = anubis.call.allow_view_reviews(call)
if allow_view_reviews:
for rf in rank_fields.values():
row.append(f"Reviews {rf['title']}: ranking factor")
row.append(f"Reviews {rf['title']}: stdev")
if len(score_fields) >= 2:
row.append("Reviews all scores: mean of means")
row.append("Reviews all scores: stdev of means")
for rf in score_fields.values():
row.append(f"Reviews {rf['title']}: N")
row.append(f"Reviews {rf['title']}: mean")
row.append(f"Reviews {rf['title']}: stdev")
allow_view_decisions = anubis.call.allow_view_decisions(call)
if allow_view_decisions:
row.append('Decision')
row.append('Decision status')
for field in call['decision']:
if not field.get('banner'): continue
title = field['title'] or field['identifier'].capitalize()
row.append(title)
ws.write_row(nrow, 0, row)
nrow += 1
for proposal in proposals:
ncol = 0
ws.write_url(nrow, ncol,
flask.url_for('proposal.display',
pid=proposal['identifier'],
_external=True),
string=proposal['identifier'])
ncol += 1
ws.write_string(nrow, ncol, proposal.get('title') or '')
ncol += 1
ws.write_string(nrow, ncol, proposal.get('submitted') and 'yes' or 'no')
ncol += 1
user = anubis.user.get_user(username=proposal['user'])
ws.write_string(nrow, ncol, utils.get_fullname(user))
ncol += 1
ws.write_string(nrow, ncol, user.get('email') or '')
ncol += 1
ws.write_string(nrow, ncol, user.get('affiliation') or '')
ncol += 1
for field in call['proposal']:
value = proposal['values'].get(field['identifier'])
if value is None:
ws.write_string(nrow, ncol, '')
elif field['type'] == constants.TEXT:
ws.write_string(nrow, ncol, value)
elif field['type'] == constants.DOCUMENT:
ws.write_url(nrow, ncol,
flask.url_for('proposal.document',
pid=proposal['identifier'],
fid=field['identifier'],
_external=True),
string='Download')
elif field['type'] == constants.SELECT:
if isinstance(value, list): # Multiselect
ws.write(nrow, ncol, '\n'.join(value))
else:
ws.write(nrow, ncol, value)
else:
ws.write(nrow, ncol, value)
ncol += 1
if allow_view_reviews:
for id in rank_fields.keys():
value = proposal['ranking'][id]['factor']
if value is None:
ws.write_string(nrow, ncol, '')
else:
ws.write_number(nrow, ncol, value)
ncol += 1
value = proposal['ranking'][id]['stdev']
if value is None:
ws.write_string(nrow, ncol, '')
else:
ws.write_number(nrow, ncol, value)
ncol += 1
if len(score_fields) >= 2:
value = proposal['scores']['__mean__']
if value is None:
ws.write_string(nrow, ncol, '')
else:
ws.write_number(nrow, ncol, value)
ncol += 1
value = proposal['scores']['__stdev__']
if value is None:
ws.write_string(nrow, ncol, '')
else:
ws.write_number(nrow, ncol, value)
ncol += 1
for id in score_fields:
ws.write_number(nrow, ncol, proposal['scores'][id]['n'])
ncol += 1
value = proposal['scores'][id]['mean']
if value is None:
ws.write_string(nrow, ncol, '')
else:
ws.write_number(nrow, ncol, value)
ncol += 1
value = proposal['scores'][id]['stdev']
if value is None:
ws.write_string(nrow, ncol, '')
else:
ws.write_number(nrow, ncol, value)
ncol += 1
if allow_view_decisions:
decision = anubis.decision.get_decision(proposal.get('decision')) or {}
if decision:
verdict = decision.get('verdict')
if verdict:
ws.write(nrow, ncol, 'Accepted')
elif verdict is None:
ws.write(nrow, ncol, 'Undecided')
else:
ws.write(nrow, ncol, 'Declined')
else:
ws.write(nrow, ncol, '-')
ncol += 1
if decision.get('finalized'):
ws.write(nrow, ncol, 'Finalized')
else:
ws.write(nrow, ncol, '-')
ncol += 1
for field in call['decision']:
if not field.get('banner'): continue
if decision.get('finalized'):
value = decision['values'].get(field['identifier'])
ws.write(nrow, ncol, value)
else:
ws.write_string(nrow, ncol, '')
ncol += 1
nrow += 1
wb.close()
return output.getvalue()
|
2077b0b38262d4dff83ebaccc808da3a4e728992
| 31,724 |
def to_array(t):
"""
Converts a taco tensor to a NumPy array.
This always copies the tensor. To avoid the copy for dense tensors, see the notes section.
Parameters
-----------
t: tensor
A taco tensor to convert to a NumPy array.
Notes
-------
Dense tensors export python's buffer interface. As a result, they can be converted to NumPy arrays using
``np.array(tensor, copy=False)`` . Attempting to do this for sparse tensors throws an error. Note that as a result
of exporting the buffer interface dense tensors can also be converted to eigen or any other library supporting this
inferface.
Also it is very important to note that if requesting a NumPy view of data owned by taco, taco will mark the array as
read only meaning the user cannot write to that data without using the taco reference. This is needed to avoid
raising issues with taco's delayed execution mechanism.
Examples
----------
We first look at a simple use of to_array
>>> import pytaco as pt
>>> t = pt.tensor([2, 2], [pt.dense, pt.compressed])
>>> t.insert([0, 0], 10)
>>> t.to_array()[0, 0]
10.0
One could choose to use np.array if a copy is not needed
>>> import pytaco as pt
>>> import numpy as np
>>> t = pt.tensor([2, 2], pt.dense)
>>> t.insert([0, 0], 10)
>>> a = np.array(t, copy=False)
>>> a
array([[10., 0.],
[ 0., 0.]], dtype=float32)
>>> t.insert([0, 0], 100) # Note that insert increments instead of setting!
>>> t.to_array()[0, 0]
110.0
Returns
---------
arr: numpy.array
A NumPy array containing a copy of the data in the tensor object t.
"""
return np.array(t.to_dense(), copy=True)
|
29df47e3535c610954e8f1bae828af80ad6ae9f7
| 31,726 |
def perform_operation(operator_sign: str, num1: float, num2: float) -> float:
"""
Perform the operation on the two numbers.
Parameters
----------
operator_sign : str
Plus, minus, multiplication or division.
num1 : float
Number 1.
num2 : float
Number 2.
Returns
-------
float
Result of the operation.
Raises
------
ValueError
Raise when the operator is not supported.
"""
operation_object = OPERATIONS.get(operator_sign, None)
if operation_object is not None:
return operation_object(num1, num2)
raise ValueError(f"Not supported operator: {operator_sign}")
|
e515a103a47b32e2a7197e10a1ad7a395433e7d9
| 31,727 |
def whitespace_tokenize(subtokens):
"""An implementation of BERT's whitespace tokenizer that preserves space."""
return split_subtokens_on(
subtokens, lambda char: char.isspace(), are_good=True)
|
09e451a80b8df66ce0a4401bf3ff681dc9c1b1da
| 31,728 |
def moon_phase(
ephemerides: skyfield.jpllib.SpiceKernel, time: skyfield.timelib.Timescale
) -> float:
"""Calculate the phase angle of the Moon.
This will be 0 degrees at new moon, 90 degrees at first quarter, 180
degrees at full moon, etc.
"""
sun = ephemerides[Planets.SUN.value]
earth = ephemerides[Planets.EARTH.value]
moon = ephemerides[Planets.MOON.value]
apparent_sun = earth.at(time).observe(sun).apparent()
_, solar_longitude, _ = apparent_sun.frame_latlon(ecliptic_frame)
apparent_moon = earth.at(time).observe(moon).apparent()
_, lunar_longitude, _ = apparent_moon.frame_latlon(ecliptic_frame)
return (lunar_longitude.degrees - solar_longitude.degrees) % 360
|
8a24d3166816ba42f150866f89fbbfa98f418ed1
| 31,729 |
def fib_functools(n):
"""Return nth fibonacci number starting at fib(1) == 0 using functools
decorator."""
# incorrect fib, but the tests expect it
if n == 0: return 1
if n in [1, 2]:
return n-1
return fib(n - 1) + fib(n - 2)
|
335908076cff922e9a27dbb2a50e88901fd7e637
| 31,730 |
def sync_filter(func, *iterables):
"""
Filter multiple iterable at once, selecting values at index i
such that func(iterables[0][i], iterables[1][i], ...) is True
"""
return tuple(zip(*tuple(i for i in zip(*iterables) if func(*i)))) or ((),) * len(
iterables
)
|
7a2ab5e6356dadff0fe78d3f2bb0da584e0ff41b
| 31,731 |
def visit_hostname(hostname):
"""
Have a chance to visit a hostname before actually using it.
:param hostname: The original hostname.
:returns: The hostname with the necessary changes.
"""
for processor in [hostname_ssl_migration, hostname_tld_migration, ]:
hostname = processor(hostname)
return hostname
|
dd8d57a88bd5951d9748c362112954e4549cdd6c
| 31,732 |
async def wait_for_other(client):
"""Await other tasks except the current one."""
base_tasks = aio.all_tasks()
async def wait_for_other():
ignore = list(base_tasks) + [aio.current_task()]
while len(tasks := [t for t in aio.all_tasks() if t not in ignore]):
await aio.gather(*tasks, return_exceptions=True)
return wait_for_other
|
ab276973862fd89ba935d70fb2fb72dbd6fe7cfa
| 31,733 |
def register_series(series, ref, pipeline):
"""Register a series to a reference image.
Parameters
----------
series : Nifti1Image object
The data is 4D with the last dimension separating different 3D volumes
ref : Nifti1Image or integer or iterable
Returns
-------
transformed_list, affine_list
"""
if isinstance(ref, nib.Nifti1Image):
static = ref
static_data = static.get_fdata()
s_aff = static.affine
moving = series
moving_data = moving.get_fdata()
m_aff = moving.affine
elif isinstance(ref, int) or np.iterable(ref):
data = series.get_fdata()
idxer = np.zeros(data.shape[-1]).astype(bool)
idxer[ref] = True
static_data = data[..., idxer]
if len(static_data.shape) > 3:
static_data = np.mean(static_data, -1)
moving_data = data[..., ~idxer]
m_aff = s_aff = series.affine
affine_list = []
transformed_list = []
for ii in range(moving_data.shape[-1]):
this_moving = moving_data[..., ii]
transformed, affine = affine_registration(this_moving, static_data,
moving_affine=m_aff,
static_affine=s_aff,
pipeline=pipeline)
transformed_list.append(transformed)
affine_list.append(affine)
return transformed_list, affine_list
|
3afefd4cf1f33cba1d04bd49437e33cfcfbdb578
| 31,735 |
import random
def normal27(startt,endt,money2,first,second,third,forth,fifth,sixth,seventh,zz1,zz2,bb1,bb2,bb3,aa1,aa2):
"""
for source and destination id generation
"""
"""
for type of banking work,label of fraud and type of fraud
"""
idvariz=random.choice(zz2)
idgirande=random.choice(aa2)
first.append("transfer")
second.append(idvariz)
third.append(idgirande)
sixth.append("0")
seventh.append("none")
"""
for amount of money generation
"""
numberofmoney=random.randrange(50000,money2)
forth.append(numberofmoney)
"""
for date and time generation randomly between two dates
"""
final=randomDate(startt,endt, random.random())
fifth.append(final)
return (first,second,third,forth,fifth,sixth,seventh)
|
469c0a88a77b083d666e938d4d13199987918e1d
| 31,736 |
def linear_diophantine(a, b, c):
"""Solve ax + by = c, where x, y are integers
1. solution exists iff c % gcd(a,b) = 0
2. all solutions have form (x0 + b'k, y0 - a'k)
Returns
-------
None if no solutions exists
(x0, y0, a', b') otherwise
"""
# d = pa + qb
p, q, d = extended_euclidian(a, b)
if d == 0 or c % d != 0:
return None
# ax + by = c <=> a'x + b'y = c'
a, b, c = a // d, b // d, c // d
return p * c, q * c, a, b
|
6b5fdebe7508249978ea97f0a40330d2ed2243b8
| 31,737 |
from operator import and_
def count_per_packet_loss(organization_id, asset_type=None, asset_status=None,
data_collector_ids=None,
gateway_ids=None, device_ids=None,
min_signal_strength=None, max_signal_strength=None,
min_packet_loss=None, max_packet_loss=None):
""" Count assets (devices+gateways) grouped by specific ranges of packet loss values
Parameters:
- asset_type: for filtering, count only this type of asset ("device", "gateway" or "none" for no assets).
- asset_status: for filtering, count only assets with this status ("connected" or "disconnected").
- data_collector_ids[]: for filtering, count only the assets connected to ANY one of these data collectors.
- gateway_ids[]: for filtering, count only the assets connected to ANY one of these gateways.
- device_ids[]: for filtering, list only the assets related to ANY of these devices
- min_signal_strength: for filtering, count only the assets with signal strength not lower than this value (dBm)
- max_signal_strength: for filtering, count only the assets with signal strength not higher than this value (dBm)
- min_packet_loss: for filtering, count only the assets with packet loss not lower than this value (percentage)
- max_packet_loss: for filtering, count only the assets with packet loss not higher than this value (percentage)
Returns:
- List of dicts, where each dict has the packet loss range id and name, and the count of assets
"""
# The packet loss ranges are defined as [L,R) = [range_limits[i], range_limits[i+1]) for every 0 <= i <= 9
range_limits = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 101]
range_names = ['[0,10)', '[10,20)', '[20,30)', '[30,40)', '[40,50)', '[50,60)', '[60,70)', '[70,80)', '[80,90)', '[90,100]']
packets_up = build_count_subquery(CounterType.PACKETS_UP)
packets_down = build_count_subquery(CounterType.PACKETS_DOWN)
packets_lost = build_count_subquery(CounterType.PACKETS_LOST)
dev_query = db.session.query()
gtw_query = db.session.query()
for i in range(0, len(range_names)):
name = range_names[i]
L = range_limits[i]
R = range_limits[i+1]
dev_query = dev_query.add_column(func.count(distinct(Device.id)).filter(and_(
packets_up.c.count + packets_down.c.count + packets_lost.c.count > 0,
L <= 100*packets_lost.c.count/(packets_up.c.count + packets_down.c.count + packets_lost.c.count),
R > 100*packets_lost.c.count/(packets_up.c.count + packets_down.c.count + packets_lost.c.count),
)).label(name))
# Gateways are not considered because they don't have the loss value
gtw_query = gtw_query.add_column(expression.literal_column("0").label(name))
dev_query = dev_query.\
select_from(Device).\
filter(Device.organization_id==organization_id).\
filter(Device.pending_first_connection==False).\
join(packets_up, Device.id == packets_up.c.device_id).\
join(packets_down, Device.id == packets_down.c.device_id).\
join(packets_lost, Device.id == packets_lost.c.device_id)
queries = add_filters(
dev_query = dev_query,
gtw_query = gtw_query,
asset_type = asset_type,
asset_status = asset_status,
data_collector_ids=data_collector_ids,
gateway_ids = gateway_ids,
device_ids = device_ids,
min_signal_strength = min_signal_strength,
max_signal_strength = max_signal_strength,
min_packet_loss = min_packet_loss,
max_packet_loss = max_packet_loss)
dev_query = queries[0]
gtw_query = queries[1]
result = query_for_count(dev_query = dev_query, gtw_query = gtw_query, asset_type = asset_type)
counts = defaultdict(lambda: {'name' : None, 'count' : 0})
if asset_type is not 'none':
for row in result:
if len(row) != len(range_names):
log.error(f"Length of range_names and length of row in packet loss query result don't match ({len(range_names)}, {len(row)})")
raise Exception()
for i in range(0, len(row)):
name = range_names[i]
L = range_limits[i]
R = range_limits[i+1]
counts[(L,R)]['name'] = name
counts[(L,R)]['count'] += row[i]
else:
for i in range(0, len(range_names)):
name = range_names[i]
L = range_limits[i]
R = range_limits[i+1]
counts[(L,R)]['name'] = name
counts[(L,R)]['count'] = 0
return [{'id' : k, 'name':v['name'], 'count':v['count']} for k, v in counts.items()]
|
bc202c8e0e77921f74281ff58857659279dba8f7
| 31,738 |
import re
def _slug_strip(value, separator=None):
"""
Cleans up a slug by removing slug separator characters that occur at the
beginning or end of a slug.
If an alternate separator is used, it will also replace any instances of
the default '-' separator with the new separator.
"""
if separator == '-' or not separator:
re_sep = '-'
else:
re_sep = '(?:-|%s)' % re.escape(separator)
value = re.sub('%s+' % re_sep, separator, value)
return re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)
|
ade4274643191ee702fe39ccefccc5d68ed3a8cb
| 31,741 |
def get_segments_loudness_max(h5, songidx=0):
"""
Get segments loudness max array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.segments_loudness_max[h5.root.analysis.songs.cols.idx_segments_loudness_max[songidx]:]
return h5.root.analysis.segments_loudness_max[h5.root.analysis.songs.cols.idx_segments_loudness_max[songidx]:
h5.root.analysis.songs.cols.idx_segments_loudness_max[songidx + 1]]
|
a65111b565686a57add325cc4c29d16b37aa89e8
| 31,742 |
def take_along_axis(arr, indices, axis):
"""
Takes values from the input array by matching 1d index and data slices.
This iterates over matching 1d slices oriented along the specified axis in the
index and data arrays, and uses the former to look up values in the latter.
These slices can be different lengths.
Args:
arr (Tensor): Source array with shape `(Ni…, M, Nk…)`.
indices (Tensor): Indices with shape `(Ni…, J, Nk…)` to take along each 1d
slice of `arr`. This must match the dimension of `arr`, but dimensions `Ni`
and `Nj` only need to broadcast against `arr`.
axis (int): The axis to take 1d slices along. If `axis` is None, the input
array is treated as if it had first been flattened to 1d.
Returns:
Tensor, the indexed result, with shape `(Ni…, J, Nk…)`.
Raises:
ValueError: If input array and indices have different number of dimensions.
TypeError: If the input is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Example:
>>> import mindspore.numpy as np
>>> x = np.arange(12).reshape(3, 4)
>>> indices = np.arange(3).reshape(1, 3)
>>> output = np.take_along_axis(x, indices, 1)
>>> print(output)
[[ 0 1 2]
[ 4 5 6]
[ 8 9 10]]
"""
_check_input_tensor(arr, indices)
if axis is None:
arr = ravel(arr)
axis = 0
ndim = F.rank(arr)
if ndim != F.rank(indices):
_raise_value_error('`indices` and `arr` must have the same number of dimensions')
axis = _check_axis_in_range(axis, ndim)
shape_arr = F.shape(arr)
shape_indices = F.shape(indices)
# broadcasts indices against the shape of arr except at axis
indices = _broadcast_to(indices, _tuple_slice(shape_indices, None, axis),
_tuple_slice(shape_arr, None, axis), ndim)
indices = _broadcast_to(indices, _tuple_slice(shape_arr, None, axis + 1) +
_tuple_slice(shape_indices, axis + 1, None), shape_arr, ndim)
return F.gather_d(arr, axis, indices)
|
84492b7ac09b26510dfe3851122587a702753b04
| 31,743 |
def _is_possible_grab(grid_world, agent_id, object_id, grab_range, max_objects):
""" Private MATRX method.
Checks if an :class:`matrx.objects.env_object.EnvObject` can be
grabbed by an agent.
Parameters
----------
grid_world : GridWorld
The :class:`matrx.grid_world.GridWorld` instance in which the
object is sought according to the `object_id` parameter.
agent_id : str
The string representing the unique identified that represents the
agent performing this action.
object_id : str
Optional. Default: ``None``
The string representing the unique identifier of the
:class:`matrx.objects.env_object.EnvObject` that should be
grabbed. When not given, a random object within range is selected.
grab_range : int
Optional. Default: ``np.inf``
The range in which the to be grabbed
:class:`matrx.objects.env_object.EnvObject` should be in.
max_objects : int
Optional. Default: ``np.inf``
The maximum of objects the agent can carry.
Returns
-------
GrabObjectResult
Depicts the action's expected success or failure and reason for
that result.
Can contain the following results:
* RESULT_SUCCESS: When the object can be successfully grabbed.
* RESULT_NO_OBJECT : When `object_id` is not given.
* RESULT_CARRIES_OBJECT: When the agent already carries the maximum
nr. objects.
* NOT_IN_RANGE: When `object_id` not within range.
* RESULT_AGENT: If the `object_id` is that of an agent.
* RESULT_OBJECT_CARRIED: When the object is already carried by
another agent.
* RESULT_OBJECT_UNMOVABLE: When the object is not movable.
* RESULT_UNKNOWN_OBJECT_TYPE: When the `object_id` does not exists
in the :class:`matrx.grid_world.GridWorld`.
"""
reg_ag = grid_world.registered_agents[agent_id] # Registered Agent
loc_agent = reg_ag.location # Agent location
if object_id is None:
return GrabObjectResult(GrabObjectResult.RESULT_NO_OBJECT, False)
# Already carries an object
if len(reg_ag.is_carrying) >= max_objects:
return GrabObjectResult(GrabObjectResult.RESULT_CARRIES_OBJECT, False)
# Go through all objects at the desired locations
objects_in_range = grid_world.get_objects_in_range(loc_agent, object_type="*", sense_range=grab_range)
objects_in_range.pop(agent_id)
# Set random object in range
if not object_id:
# Remove all non objects from the list
for obj in list(objects_in_range.keys()):
if obj not in grid_world.environment_objects.keys():
objects_in_range.pop(obj)
# Select a random object
if objects_in_range:
object_id = grid_world.rnd_gen.choice(list(objects_in_range.keys()))
else:
return GrabObjectResult(GrabObjectResult.NOT_IN_RANGE, False)
# Check if object is in range
if object_id not in objects_in_range:
return GrabObjectResult(GrabObjectResult.NOT_IN_RANGE, False)
# Check if object_id is the id of an agent
if object_id in grid_world.registered_agents.keys():
# If it is an agent at that location, grabbing is not possible
return GrabObjectResult(GrabObjectResult.RESULT_AGENT, False)
# Check if it is an object
if object_id in grid_world.environment_objects.keys():
env_obj = grid_world.environment_objects[object_id] # Environment object
# Check if the object is not carried by another agent
if len(env_obj.carried_by) != 0:
return GrabObjectResult(GrabObjectResult.RESULT_OBJECT_CARRIED.replace("{AGENT_ID}",
str(env_obj.carried_by)),
False)
elif not env_obj.properties["is_movable"]:
return GrabObjectResult(GrabObjectResult.RESULT_OBJECT_UNMOVABLE, False)
else:
# Success
return GrabObjectResult(GrabObjectResult.RESULT_SUCCESS, True)
else:
return GrabObjectResult(GrabObjectResult.RESULT_UNKNOWN_OBJECT_TYPE, False)
|
a57d120747199b84b3047d822547b5367d2b9905
| 31,744 |
def euclidean_distance_matrix(embeddings):
"""Get euclidean distance matrix based on embeddings
Args:
embeddings (:obj:`numpy.ndarray`): A `ndarray` of shape
`[num_sensors, dim]` that translates each sensor into a vector
embedding.
Returns:
A `ndarray` of shape `[num_sensors, num_sensors]` where each element is
the euclidean distance between two sensors.
"""
num_sensors = embeddings.shape[0]
distance_matrix = np.zeros((num_sensors, num_sensors), dtype=np.float32)
for i in range(num_sensors):
for j in range(num_sensors):
distance_matrix[i, j] = distance.euclidean(
embeddings[i, :], embeddings[j, :]
)
return distance_matrix
|
5b50248a94eb926078a20fd5efbac83f115c26b0
| 31,745 |
def get_actor_id(name):
"""
Get TMDB id for an actor based on their name.
If more than one result (likely), fetches the
first match. TMDB results are sorted by popularity,
so first match is likely to be the one wanted.
"""
search = tmdb.Search()
search.person(query=name)
# get id of first result
tmdb_id = search.results[0]['id']
return tmdb_id
|
ac75cbaac7dec85fd965d8cc421c6bbba8fc5f67
| 31,746 |
import json
def generate_prompt(
test_case_path, prompt_path, solutions_path, tokenizer, starter_path=None
):
"""
Generate a prompt for a given test case.
Original version from https://github.com/hendrycks/apps/blob/main/eval/generate_gpt_codes.py#L51.
"""
_input = "\nQUESTION:\n"
with open(prompt_path, "r") as f:
data = f.readlines()
data = "".join(data)
_input += data
if starter_path != None:
with open(starter_path, "r") as f:
data = f.readlines()
data = "".join(data)
data = "\n" + data # + "\n"
_input += data
else:
# _input += "\n\n"
pass
with open(test_case_path, "r") as f:
data = json.load(f)
if not data.get("fn_name"):
_input += "\nUse Standard Input format" # \n"
else:
_input += "\nUse Call-Based format" # \n"
_input += "\nANSWER:\n"
return _input
|
ecd3218839b346741e5beea8ec7113ea2892571e
| 31,747 |
def projects_upload_to(instance, filename):
"""construct path to uploaded project archives"""
today = timezone.now().strftime("%Y/%m")
return "projects/{date}/{slug}/{filename}".format(
date=today, slug=instance.project.slug, filename=filename)
|
01f97cf5994cca7265ede0a4b5c73672f61e2f90
| 31,748 |
def assign_employee(id):
"""
Assign a department and a role to an employee
"""
check_admin()
employee = Employee.query.get_or_404(id)
form = EmployeeAssignForm(obj=employee)
employee.department = form.department.data
employee.role = form.role.data
db.session.add(employee)
db.session.commit()
flash('You have successfully assigned a department and role.')
# redirect to the roles page
return redirect(url_for('admin.list_employees'))
return render_template('admin/employees/employee_assign.html',
employee=employee, form=form,
title='Assign Employee')
|
f88a1b49cadf73d8a62c0be23742fd03cace36cd
| 31,749 |
import copy
def autofocus(field, nm, res, ival, roi=None,
metric="average gradient", minimizer="lmfit",
minimizer_kwargs=None, padding=True, num_cpus=1):
"""Numerical autofocusing of a field using the Helmholtz equation.
Parameters
----------
field: 1d or 2d ndarray
Electric field is BG-Corrected, i.e. field = EX/BEx
nm: float
Refractive index of medium.
res: float
Size of wavelength in pixels.
ival: tuple of floats
Approximate interval to search for optimal focus in px.
roi: rectangular region of interest (x1, y1, x2, y2)
Region of interest of `field` for which the metric will be
minimized. If not given, the entire `field` will be used.
metric: str
- "average gradient" : average gradient metric of amplitude
- "rms contrast" : RMS contrast of phase data
- "spectrum" : sum of filtered Fourier coefficients
minimizer: str
- "lmfit" : lmfit-based minimizer
- "legacy" : only use for reproducing old results
minimizer_kwargs: dict
Optional keyword arguments to the `minimizer` function
padding: bool
Perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location
num_cpus: int
Not implemented.
Returns
-------
d, field [, other]:
The focusing distance, the field, and optionally any other
data returned by the minimizer (specify via `minimizer_kwargs`).
Notes
-----
This method uses :class:`nrefocus.RefocusNumpy` for refocusing
of 2D fields. This is because the :func:`nrefocus.refocus_stack`
function uses `async` which appears to not work with e.g.
:mod:`pyfftw`.
"""
fshape = len(field.shape)
if fshape == 1:
# 1D field
rfcls = iface.RefocusNumpy1D
elif fshape == 2:
# 2D field
rfcls = iface.RefocusNumpy
else:
raise AssertionError("Dimension of `field` must be 1 or 2.")
if minimizer_kwargs is None:
minimizer_kwargs = {}
else:
minimizer_kwargs = copy.deepcopy(minimizer_kwargs)
# use a made-up pixel size so we can use the new `Refocus` interface
pixel_size = 1
rf = rfcls(field=field,
wavelength=res*pixel_size,
pixel_size=pixel_size,
medium_index=nm,
distance=0,
kernel="helmholtz",
padding=padding
)
data = rf.autofocus(metric=metric,
minimizer=minimizer,
interval=np.array(ival)*rf.pixel_size,
roi=roi,
minimizer_kwargs=minimizer_kwargs,
ret_grid=False,
ret_field=True,
)
return data
|
a954f96cf8c3c16dbdfb9ea31c22e61cac2a9245
| 31,750 |
def ZeroPaddedRoundsError(handler=None):
"""error raised if hash was recognized but contained zero-padded rounds field"""
return MalformedHashError(handler, "zero-padded rounds")
|
b0ff8bb894505041382aaf2d79f027708c7a2134
| 31,751 |
def get_adj_mat(G):
"""Represent ppi network as adjacency matrix
Parameters
----------
G : networkx graph
ppi network, see get_ppi()
Returns
-------
adj : square sparse scipy matrix
(i,j) has a 1 if there is an interaction reported by irefindex
ids : list
same length as adj, ith index contains irefindex unique identifier for gene whose interactions are
reported in the ith row of adj
"""
ids = G.nodes()
adj = nx.to_scipy_sparse_matrix(G, nodelist=ids, dtype=bool)
return adj, ids
|
95ee8df6be45f12df8da93c7fed10a3c8a32a058
| 31,752 |
def load_ndarray_list(fname):
"""Load a list of arrays saved by `save_ndarray_list`.
Parameters
----------
fname : string
filename to load.
Returns
-------
la : list of np.ndarrays
The list of loaded numpy arrays. This should be identical tp
what was saved by `save_ndarray_list`.
"""
d1 = np.load(fname)
la = [ v for i, v in sorted(d1.iteritems(), key=lambda kv: int(kv[0]))]
#la = [ v for i, v in sorted(d1.items(), key=lambda kv: int(kv[0]))]
return la
|
fc76373d45c8934bd81d6ac7e144dff97ae6d1c9
| 31,753 |
def save_result(data, format, options=UNSET) -> ProcessBuilder:
"""
Save processed data to storage
:param data: The data to save.
:param format: The file format to save to. It must be one of the values that the server reports as
supported output file formats, which usually correspond to the short GDAL/OGR codes. If the format is not
suitable for storing the underlying data structure, a `FormatUnsuitable` exception will be thrown. This
parameter is *case insensitive*.
:param options: The file format parameters to be used to create the file(s). Must correspond to the
parameters that the server reports as supported parameters for the chosen `format`. The parameter names and
valid values usually correspond to the GDAL/OGR format options.
:return: `false` if saving failed, `true` otherwise.
"""
return process('save_result', data=data, format=format, options=options)
|
be9e8f36869cbe2fdf7b938dfd59cf7b8743ff2a
| 31,754 |
def load_suites_from_classes(classes):
# type: (Sequence[Any]) -> List[Suite]
"""
Load a list of suites from a list of classes.
"""
return list(
filter(
lambda suite: not suite.hidden, map(load_suite_from_class, classes)
)
)
|
6c4b45c7ab99a3e3f7742f247ea65552c7c70927
| 31,755 |
import torch
def update(quantized_model, distilD):
"""
Update activation range according to distilled data
quantized_model: a quantized model whose activation range to be updated
distilD: distilled data
"""
print('******updateing BN stats...', end='')
with torch.no_grad():
for batch_idx, inputs in enumerate(distilD):
if isinstance(inputs, list):
inputs = inputs[0]
inputs = inputs.cuda()
outputs = quantized_model(inputs)
print(' Finished******')
return quantized_model
|
70e4cd9032e12f1f461c1cd13ac81ead06091728
| 31,756 |
def _get_color(value):
"""To make positive DFCs plot green, negative DFCs plot red."""
green, red = sns.color_palette()[2:4]
if value >= 0:
return green
return red
|
888edb2307bd6f4da65c6c1d5c0a40cb146dfa8c
| 31,758 |
def parse_feed(feed: str) -> list:
"""
Parses a TV Show *feed*, returning the episode files included in that feed.
:param feed: the feed to parse
:return: list of episode files included in *feed*
"""
try:
root = ElementTree.fromstring(feed)
except ElementTree.ParseError as error:
raise ParseError(str(error))
channel = root.find('channel')
if channel is None:
raise ParseError("feed's format is invalid: missing 'channel' element")
# This function is used multiple times in the loop below
def attr(item, attribute):
""" Fetches the text of *attribute* from *item* element """
attribute = item.find(attribute)
if attribute is None:
raise ParseError(f"item is missing required attribute {attribute}")
return attribute.text
files = []
for item in channel.findall('item'):
item = {
"title": attr(item, 'title'),
"link": attr(item, 'link')
}
files.append(parse_item(item))
return files
|
6fec9c1d71d3ae31480103dd2e6a2f5d81e12287
| 31,759 |
def copy_emb_weights(embedding, idx2word, embedding_weights, emb_index_dict, vocab_size):
"""Copy from embs weights of words that appear in our short vocabulary (idx2word)."""
c = 0
for i in range(vocab_size):
w = idx2word[i]
g = emb_index_dict.get(w, emb_index_dict.get(w.lower()))
if g is None and w.startswith('#'): # glove has no hastags (I think...)
w = w[1:]
g = emb_index_dict.get(w, emb_index_dict.get(w.lower()))
if g is not None:
embedding[i, :] = embedding_weights[g, :]
c += 1
print('number of tokens, in small vocab: {:,} found in embeddings and copied to embedding: {:.4f}'.format(c, c / float(vocab_size)))
return embedding
|
e5d361efd342cc7e194ee325fdf4a98831121576
| 31,762 |
def cost_aggregation(c_v, max_d):
"""
the formula
Lr(p,d) = C(p,d)
+ min[Lr(p-r, d),Lr(p-r, d-1)+p1,Lr(p-r,d+1)+p1,miniLr(p-r, i)+p2]
- minkLr(p-r,k)
:param c_v:
:param max_d:
:return: sum of all the Lr
"""
(H, W, D) = c_v.shape
p1 = 10
p2 = 120
Lr1 = np.zeros((H, W, D), np.uint32) # from up
Lr2 = np.zeros((H, W, D), np.uint32) # from left
Lr3 = np.zeros((H, W, D), np.uint32) # from right
Lr4 = np.zeros((H, W, D), np.uint32) # from down
# Lr5 = np.zeros((H, W, D), np.uint32) # from up_left
print('agg from up started.')
Lr1[0, :, :] = c_v[0, :, :] # border, first row
for r in range(1, H):
for d in range(0, max_d):
Lr1_1 = np.squeeze(Lr1[r - 1, :, d])
if d != 0: # disparity is not the bottom
Lr1_2 = np.squeeze(Lr1[r - 1, :, d - 1] + p1)
else:
Lr1_2 = np.squeeze(Lr1_1 + p1)
if d != max_d - 1:
Lr1_3 = np.squeeze(Lr1[r - 1, :, d + 1] + p1)
else:
Lr1_3 = np.squeeze(Lr1_1 + p1)
Lr1_4 = np.squeeze(np.min(Lr1[r - 1, :, :], axis=1) + p2)
Lr1_5 = np.min(Lr1[r - 1, :, :], axis=1)
Lr1[r, :, d] = c_v[r, :, d] + np.min(np.vstack([Lr1_1, Lr1_2, Lr1_3, Lr1_4]), axis=0) - Lr1_5
print('agg from left started.')
Lr2[:, 0, :] = c_v[:, 0, :] # border, first column
for c in range(1, W):
for d in range(0, max_d):
Lr2_1 = np.squeeze(Lr2[:, c - 1, d])
if d != 0: # disparity is not the bottom
Lr2_2 = np.squeeze(Lr2[:, c - 1, d - 1] + p1)
else:
Lr2_2 = np.squeeze(Lr2_1 + p1)
if d != max_d - 1:
Lr2_3 = np.squeeze(Lr2[:, c - 1, d + 1] + p1)
else:
Lr2_3 = np.squeeze(Lr2_1 + p1)
Lr2_4 = np.squeeze(np.min(Lr2[:, c - 1, :], axis=1) + p2)
Lr2_5 = np.min(Lr2[:, c - 1, :], axis=1)
Lr2[:, c, d] = c_v[:, c, d] + np.min(np.vstack([Lr2_1, Lr2_2, Lr2_3, Lr2_4]), axis=0) - Lr2_5
print('agg from right started.')
Lr3[:, 0, :] = c_v[:, -1, :] # border, last column
for c in range(W - 2, -1, -1):
for d in range(0, max_d):
Lr3_1 = np.squeeze(Lr3[:, c + 1, d])
if d != 0: # disparity is not the bottom
Lr3_2 = np.squeeze(Lr3[:, c + 1, d - 1] + p1)
else:
Lr3_2 = np.squeeze(Lr3_1 + p1)
if d != max_d - 1:
Lr3_3 = np.squeeze(Lr3[:, c + 1, d + 1] + p1)
else:
Lr3_3 = np.squeeze(Lr3_1 + p1)
Lr3_4 = np.squeeze(np.min(Lr3[:, c + 1, :], axis=1) + p2)
Lr3_5 = np.min(Lr3[:, c + 1, :], axis=1)
Lr3[:, c, d] = c_v[:, c, d] + np.min(np.vstack([Lr3_1, Lr3_2, Lr3_3, Lr3_4]), axis=0) - Lr3_5
print('agg from down started.')
Lr4[0, :, :] = c_v[-1, :, :] # border, last row
for r in range(H - 2, -1, -1):
for d in range(0, max_d):
Lr4_1 = np.squeeze(Lr4[r + 1, :, d])
if d != 0: # disparity is not the bottom
Lr4_2 = np.squeeze(Lr4[r + 1, :, d - 1] + p1)
else:
Lr4_2 = np.squeeze(Lr4_1 + p1)
if d != max_d - 1:
Lr4_3 = np.squeeze(Lr4[r + 1, :, d + 1] + p1)
else:
Lr4_3 = np.squeeze(Lr4_1 + p1)
Lr4_4 = np.squeeze(np.min(Lr4[r + 1, :, :], axis=1) + p2)
Lr4_5 = np.min(Lr4[r + 1, :, :], axis=1)
Lr4[r, :, d] = c_v[r, :, d] + np.min(np.vstack([Lr4_1, Lr4_2, Lr4_3, Lr4_4]), axis=0) - Lr4_5
# print('agg from up-left started')
# Lr5[0, :, :] = c_v[0, :, :]
# for c in range(1, W):
# for d in range(0, max_d):
# if c <= W - H - 1: # The path does not need to be split.
# for x, y in zip(range(c, W, 1), range(0, H, 1)):
# Lr5_1 = Lr5[y - 1, x - 1, d]
# if d != 0:
# Lr5_2 = Lr5[y - 1, x - 1, d - 1] + p1
# else:
# Lr5_2 = Lr5_1 + p1
# if d != max_d - 1:
# Lr5_3 = Lr5[y - 1, x - 1, d + 1] + p1
# else:
# Lr5_3 = Lr5_1 + p1
# Lr5_4 = np.min(Lr5[y - 1, x - 1, :], axis=0) + p2
# Lr5_5 = np.min(Lr5[y - 1, x - 1, :], axis=0)
#
# Lr5[y, x, d] = c_v[y, x, d] + min(Lr5_1, Lr5_2, Lr5_3, Lr5_4) - Lr5_5
# else: # the pass needs to be split
# for x, y in zip(range(c, W, 1), range(0, W - c, 1)): # first part
# Lr5_1 = Lr5[y - 1, x - 1, d]
# if d != 0:
# Lr5_2 = Lr5[y - 1, x - 1, d - 1] + p1
# else:
# Lr5_2 = Lr5_1 + p1
# if d != max_d - 1:
# Lr5_3 = Lr5[y - 1, x - 1, d + 1] + p1
# else:
# Lr5_3 = Lr5_1 + p1
# Lr5_4 = np.min(Lr5[y - 1, x - 1, :], axis=0) + p2
# Lr5_5 = np.min(Lr5[y - 1, x - 1, :], axis=0)
#
# Lr5[y, x, d] = c_v[y, x, d] + min(Lr5_1, Lr5_2, Lr5_3, Lr5_4) - Lr5_5
# for x, y in zip(range(0, W, 1), range(W - c, H, 1)): # second part
# if x == 0: # the head
# Lr5_1 = Lr5[y - 1, W - 1, d]
# if d != 0:
# Lr5_2 = Lr5[y - 1, W - 1, d - 1] + p1
# else:
# Lr5_2 = Lr5_1 + p1
# if d != max_d - 1:
# Lr5_3 = Lr5[y - 1, W - 1, d + 1] + p1
# else:
# Lr5_3 = Lr5_1 + p1
# Lr5_4 = np.min(Lr5[y - 1, W - 1, :], axis=0) + p2
# Lr5_5 = np.min(Lr5[y - 1, W - 1, :], axis=0)
#
# Lr5[y, x, d] = c_v[y, x, d] + min(Lr5_1, Lr5_2, Lr5_3, Lr5_4) - Lr5_5
# else:
# Lr5_1 = Lr5[y - 1, x - 1, d]
# if d != 0:
# Lr5_2 = Lr5[y - 1, x - 1, d - 1] + p1
# else:
# Lr5_2 = Lr5_1 + p1
# if d != max_d - 1:
# Lr5_3 = Lr5[y - 1, x - 1, d + 1] + p1
# else:
# Lr5_3 = Lr5_1 + p1
# Lr5_4 = np.min(Lr5[y - 1, x - 1, :], axis=0) + p2
# Lr5_5 = np.min(Lr5[y - 1, x - 1, :], axis=0)
#
# Lr5[y, x, d] = c_v[y, x, d] + min(Lr5_1, Lr5_2, Lr5_3, Lr5_4) - Lr5_5
return Lr1 + Lr2 + Lr3 + Lr4
|
71ff7bbbea8c3faebc8269d707198240c258c9c3
| 31,763 |
def send_approved_resource_email(user, request, reason):
"""
Notify the user the that their request has been approved.
"""
email_template = get_email_template()
template = "core/email/resource_request_approved.html"
subject = "Your Resource Request has been approved"
context = {
"support_email": email_template.email_address,
"support_email_header": email_template.email_header,
"support_email_footer": email_template.email_footer,
"user": user.username,
"request": request,
"reason": reason
}
from_name, from_email = admin_address()
user_email = lookupEmail(user.username)
recipients = [email_address_str(user.username, user_email)]
sender = email_address_str(from_name, from_email)
return send_email_template(subject, template, recipients, sender,
context=context, cc=[sender])
|
52571470104e802ef9bcc491fa614a9420710df5
| 31,764 |
from sphinx.util.nodes import traverse_parent
import warnings
def is_in_section_title(node: Element) -> bool:
"""Determine whether the node is in a section title"""
warnings.warn('is_in_section_title() is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
for ancestor in traverse_parent(node):
if isinstance(ancestor, nodes.title) and \
isinstance(ancestor.parent, nodes.section):
return True
return False
|
fb1e981e9ec8ad26cb49a144eb696d035dcbc2e8
| 31,765 |
def common_mgr():
"""
Create a base topology.
This uses the ExtendedNMLManager for it's helpers.
"""
# Create base topology
mgr = ExtendedNMLManager(name='Graphviz Namespace')
sw1 = mgr.create_node(identifier='sw1', name='My Switch 1')
sw2 = mgr.create_node(identifier='sw2', name='My Switch 2')
assert mgr.get_object('sw1') is not None
assert mgr.get_object('sw2') is not None
sw1p1 = mgr.create_biport(sw1)
sw1p2 = mgr.create_biport(sw1)
sw1p3 = mgr.create_biport(sw1) # noqa
sw2p1 = mgr.create_biport(sw2)
sw2p2 = mgr.create_biport(sw2)
sw2p3 = mgr.create_biport(sw2) # noqa
sw1p1_sw2p1 = mgr.create_bilink(sw1p1, sw2p1) # noqa
sw1p2_sw2p2 = mgr.create_bilink(sw1p2, sw2p2) # noqa
return mgr
|
e181b231bc859a6595417bbc63b695a00d7c3ae7
| 31,766 |
def reshape_array_h5pyfile(array,number_of_gatesequences,datapoints_per_seq):
"""reshaping function"""
new_array = np.reshape(array,(number_of_gatesequences,datapoints_per_seq),order='F') #order is important, for data as column,
#use order F, this will give an number_of_gatesequences x datapoints_per_seq matrix
return new_array
|
c05edd9963396362f3f36f4293e28eb05fe22359
| 31,767 |
def build_value_counts_query(table: str,
categorical_column: str,
limit: int):
"""
Examples:
SELECT
{column_name},
COUNT (*) as frequency
FROM
`{table}`
WHERE
{not_null_string}
GROUP BY
{column_name}
ORDER BY
frequency DESC
LIMIT {limit}
Args:
table: (string), full path of the table
categorical_column: (string), name of the numerical column
limit: (int), return the top counts
Returns:
string
"""
template = query_templates.VALUE_COUNTS_TEMPLATE
not_null_string = _build_not_null_string([categorical_column])
query = template.format(
table=table,
column_name=categorical_column,
limit=limit,
not_null_string=not_null_string
)
return query
|
605e25310e3c91c693d72e7e4eae3c513cea2a8b
| 31,768 |
from pathlib import Path
def get_model_benchmarks_data(benchmark_runlogs_filepath: Path):
"""
Return Python dict with summary of model performance for one choice of
training set size.
"""
benchmark_genlog = read_json(benchmark_runlogs_filepath)
benchmark_runlog = read_json(benchmark_runlogs_filepath.parent / "runlog.json")
assert benchmark_runlog["out.status"] == "SUCCESS"
return {
"pipeline_run_id": benchmark_runlog["parameters.pipeline_run_id"],
"nr_train_images": benchmark_runlog["parameters.task.nr_train_images"],
"runtime_ms": benchmark_runlog["out.timing.duration_ms"],
"roc_auc": benchmark_genlog["key-values"]["roc_auc_class_mean"],
}
|
494e94a371a682e84f211204b189f3d17727f1c0
| 31,769 |
import collections
def make_labels(module_path, *names, **names_labels):
"""Make a namespace of labels."""
return collections.Namespace(
*((name, Label(module_path, name)) for name in names),
*((n, l if isinstance(l, Label) else Label(module_path, l))
for n, l in names_labels.items()),
)
|
aaf0d204442bb9b712c2cf17babe45fd46905c8d
| 31,770 |
import socket
def check_port_occupied(port, address="127.0.0.1"):
"""Check if a port is occupied by attempting to bind the socket and returning any resulting error.
:return: socket.error if the port is in use, otherwise False
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((address, port))
except socket.error as e:
return e
finally:
s.close()
return False
|
4bf302f89793df47a28cb2bd608abd4344f40ff2
| 31,771 |
def test_heterogeneous_multiagent_env(
common_config,
pf_config,
multicomponent_building_config,
pv_array_config,
ev_charging_config
):
"""Test multiagent env with three heterogeneous agents."""
building_agent ={
"name": "building",
"bus": "675c",
"cls": MultiComponentEnv,
"config": {"components": multicomponent_building_config}
}
# Next, build the PV and EV charging envs
ev_agent = {
"name": "ev-charging",
"bus": "675c",
"cls": EVChargingEnv,
"config": ev_charging_config
}
pv_agent = {
"name": "pv",
"bus": "675c",
"cls": PVEnv,
"config": pv_array_config
}
agents = [building_agent, ev_agent, pv_agent]
env_config = {
"common_config": common_config,
"pf_config": pf_config,
"agents": agents
}
env = MultiAgentEnv(**env_config)
return multi_agent_episode_runner(env)
|
75939f0e548001ab34f411d15ae822cc7b1c1790
| 31,772 |
import requests
from bs4 import BeautifulSoup
def find_sublinks(artist_link):
"""Some artists have that many songs so we have multiple pages for them. This functions finds all subpages for given artist
e.g if we have page freemidi/queen_1 script go on that page and seek for all specific hyperlinks.
as a return we could get [freemidi/queen_1, freemidi/queen_2, ...., freemidi/queen_n]
Args:
artist_link (str): link to the home page of the artist
Returns:
_type_: list of all pages with songs that can be reached from the artist_link
"""
links = [artist_link]
URL = f"https://freemidi.org/{artist_link}" # as it's written it works only for freemidi page
artist_page = requests.get(URL)
artist_soup = BeautifulSoup(artist_page.content, "html.parser")
#So we iterate over all specific hyperlinks, and add them to the list
for a in artist_soup.find(class_="pagination").find_all("a"):
link = a["href"]
if link != "#":
links.append(link)
return links
|
3b24623d1cdbf4bf83f92a6e741576ff74e3facb
| 31,773 |
from unittest.mock import patch
def class_mock(request, q_class_name, autospec=True, **kwargs):
"""Return mock patching class with qualified name *q_class_name*.
The mock is autospec'ed based on the patched class unless the optional
argument *autospec* is set to False. Any other keyword arguments are
passed through to Mock(). Patch is reversed after calling test returns.
"""
_patch = patch(q_class_name, autospec=autospec, **kwargs)
request.addfinalizer(_patch.stop)
return _patch.start()
|
08bd1aacf75784668845ace13af6514461850d1a
| 31,774 |
import inspect
def kwargs_only(fn):
"""Wraps function so that callers must call it using keyword-arguments only.
Args:
fn: fn to wrap.
Returns:
Wrapped function that may only be called using keyword-arguments.
"""
if hasattr(inspect, 'getfullargspec'):
# For Python 3
args = inspect.getfullargspec(fn)
varargs = args.varargs
keywords = args.varkw
else:
# For Python 2
args = inspect.getargspec(fn) # pylint: disable=deprecated-method
varargs = args.varargs
keywords = args.keywords
if varargs is not None:
raise TypeError('function to wrap should not have *args parameter')
if keywords is not None:
raise TypeError('function to wrap should not have **kwargs parameter')
arg_list = args.args
has_default = [False] * len(arg_list)
default_values = [None] * len(arg_list)
has_self = arg_list[0] == 'self'
if args.defaults:
has_default[-len(args.defaults):] = [True] * len(args.defaults)
default_values[-len(args.defaults):] = args.defaults
def wrapped_fn(*args, **kwargs):
"""Wrapped function."""
if args:
if not has_self or (has_self and len(args) != 1):
raise TypeError('function %s must be called using keyword-arguments '
'only.' % fn.__name__)
if has_self:
if len(args) != 1:
raise TypeError('function %s has self argument but not called with '
'exactly 1 positional argument' % fn.__name__)
kwargs['self'] = args[0]
kwargs_to_pass = {}
for arg_name, arg_has_default, arg_default_value in zip(
arg_list, has_default, default_values):
if not arg_has_default and arg_name not in kwargs:
raise TypeError('function %s must be called with %s specified' %
(fn.__name__, arg_name))
kwargs_to_pass[arg_name] = kwargs.pop(arg_name, arg_default_value)
if kwargs:
raise TypeError('function %s called with extraneous kwargs: %s' %
(fn.__name__, kwargs.keys()))
return fn(**kwargs_to_pass)
return wrapped_fn
|
cc5bb7d4d31d1bb392c306410c3c22267e93e891
| 31,775 |
def _labeling_complete(labeling, G):
"""Determines whether or not LPA is done.
Label propagation is complete when all nodes have a label that is
in the set of highest frequency labels amongst its neighbors.
Nodes with no neighbors are considered complete.
"""
return all(labeling[v] in _most_frequent_labels(v, labeling, G)
for v in G if len(G[v]) > 0)
|
130454cbb4a3bc77dfb94f97f20ad11e3239fb82
| 31,776 |
import _random
def get_random_id_str(alphabet=None):
""" Get random integer and encode it to URL-safe string. """
if not alphabet:
alphabet = BASE62
n = _random(RANDOM_ID_SOURCE_BYTES)
return int2str(n, len(alphabet), alphabet)
|
51d27c2838ccdd506e23aa2e707ac304e80c249c
| 31,777 |
def multivariate_normal_pdf(x, mean, cov):
"""Unnormalized multivariate normal probability density function."""
# Convert to ndarray
x = np.asanyarray(x)
mean = np.asanyarray(mean)
cov = np.asarray(cov)
# Deviation from mean
dev = x - mean
if isinstance(dev, np.ma.MaskedArray):
if np.all(np.ma.getmaskarray(dev)):
return np.ones(dev.shape[:-1])
else:
dev = np.ma.getdata(dev) * ~np.ma.getmaskarray(dev)
# Broadcast cov, if needed
if cov.ndim <= dev.ndim:
extra_dim = 1 + dev.ndim - cov.ndim
cov = np.broadcast_to(cov, (1,) * extra_dim + cov.shape)
exponent = -0.5 * np.einsum('...i,...i', dev, np.linalg.solve(cov, dev))
return np.exp(exponent) / np.sqrt(np.linalg.det(cov))
|
ce3c9171ee7cf78660118ecdab1949efce402827
| 31,778 |
def remove_below(G, attribute, value):
""" Remove attribute below certain value
Parameters
----------
G : nx.graph
Graph
attribute : str
Attribute
value : float
Value
Returns
-------
G : nx.graph
Graph
"""
# Assertions
assert isinstance(G, nx.Graph), "G is not a NetworkX graph"
# Calculation
for node in G.nodes:
if G.nodes[node][attribute] > value:
G.nodes[node][attribute] = float('nan')
return G
|
8d60ce75d8334d5de52b877a9fcd6a9c826c7418
| 31,779 |
def format_header(header_values):
"""
Formats a row of data with bolded values.
:param header_values: a list of values to be used as headers
:return: a string corresponding to a row in enjin table format
"""
header = '[tr][td][b]{0}[/b][/td][/tr]'
header_sep = '[/b][/td][td][b]'
return header.format(header_sep.join(header_values))
|
5b7cd734a486959660551a6d915fbbf52ae7ef1e
| 31,780 |
import random
def getRandomWalk(initial_position: int, current_path: np.ndarray, adjacency_matrix: np.ndarray,
heuristic: np.ndarray, pheromone: np.ndarray, alpha: float, max_lim: int,
Q: float or None, R: float or None) -> np.ndarray:
"""
Function that given an array indicating the nodes traversed (path traversed by an ant), a
binary adjacency matrix indicating the structure of the graph to be traversed and the
parameters that regulate the stochastic choices that the ants will make when choosing their
movements (alpha and beta parameters that regulates the influence of the pheromone and
heuristic values on the decisions taken by the ants) returns a binary adjacency matrix
indicating the path traversed by the ant.
Parameters
----------
initial_position: int
Integer indicating the initial position of the ant.
current_path: np.ndarray (nodes), dtype=np.int8
Array with nodes visited by the ant. The current_path argument must include the initial
position of the ant.
adjacency_matrix: np.ndarray (nodes, nodes), dtype=np.int8
Binary adjacency matrix defining the structure of the graph to be traversed.
heuristic: np.ndarray (nodes, nodes), dtype=np.float64
Heuristic information matrix used by the stochastic ant policy to decide the ant's
movements.
pheromone: np.ndarray (nodes, nodes), dtype=np.float64
Pheromone information matrix used by the stochastic ant policy to decide the ant's
movements. The parameters of this matrix will be updated throughout the interactions of
the algorithm.
alpha: float
Parameter that reference the influence of pheromones when the ant makes a decision on the
path through the walk being constructed.
max_lim: int
Maximum path length.
Q: float, default=None
Parameter that determines the probability of selecting the next move deterministically
by selecting the move to the node that has the highest probability. By default this
parameter will not be considered.
R: float, default=None
Parameter that determines the probability of selecting the next move randomly without
taking into account the computation of the pheromone matrix and heuristics. By default
this parameter will not be considered.
Returns
-------
:np.ndarray (nodes)
Array with the nodes visited by the ant arranged in the order in which they have been
visited.
"""
movements = getValidPaths(initial_position, current_path, adjacency_matrix)
n_partial_solutions = 1
# Add partial solutions to the current path as long as possible.
while len(movements) > 0 and n_partial_solutions < max_lim:
if len(movements) == 1:
mov = movements[0]
elif Q is not None and random.random() < Q: # Deterministic selection of the move
probs = stochasticAS(
initial_position, np.array(movements), heuristic, pheromone, alpha)
mov = movements[np.argmax(probs)]
elif R is not None and random.random() < R: # Random selection of the move
mov = random.choice(movements)
else: # Stochastic selection of the next move
probs = stochasticAS(
initial_position, np.array(movements), heuristic, pheromone, alpha)
mov = movements[rouletteWheel(probs, random.random())]
current_path = np.append(current_path, mov)
movements = getValidPaths(mov, current_path, adjacency_matrix)
initial_position = mov
n_partial_solutions += 1
return current_path
|
a19a33cea8aadd58d99f87bb3ef111ce33df1ce7
| 31,782 |
def sample_circle(plane="xy", N=100):
"""Define all angles in a certain plane."""
phi = np.linspace(0, 2 * np.pi, N)
if plane == "xy":
return np.array([np.cos(phi), np.sin(phi), np.ones_like(phi)])
elif plane == "xz":
return np.array([np.cos(phi), np.ones_like(phi), np.sin(phi)])
elif plane == "yz":
return np.array([np.ones_like(phi), np.cos(phi), np.sin(phi)])
|
1546c3e74b5ef1f7d43fa3352a708b5f7acf03ae
| 31,783 |
from pathlib import Path
from datetime import datetime
def generate_today_word_cloud(path='images/'):
"""
generate today word cloud
Args:
path (str, optional): [description]. Defaults to 'images/'.
"""
terms_counts = get_term_count()
if terms_counts:
word_cloud = generate_word_cloud(terms_counts, drc_flag_color_map)
word_cloud_path = Path.cwd().joinpath(
path, 'word_cloud', datetime.today().strftime('%m-%d-%Y'))
word_cloud_path = "{}.png".format(word_cloud.__str__)
word_cloud.to_file(word_cloud_path)
return word_cloud_path
|
f606135b181235eba5df4367d8721ff73e98af48
| 31,784 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.