content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def cuda_reshape(a, shape):
""" Reshape a GPUArray.
Parameters:
a (gpu): GPUArray.
shape (tuple): Dimension of new reshaped GPUArray.
Returns:
gpu: Reshaped GPUArray.
Examples:
>>> a = cuda_reshape(cuda_give([[1, 2], [3, 4]]), (4, 1))
array([[ 1.],
[ 2.],
[ 3.],
[ 4.]])
>>> type(a)
<class 'pycuda.gpuarray.GPUArray'>
"""
return a.reshape(shape)
|
966cae8aeb88aeaeada28a11c284920746771f00
| 23,881 |
def test_ep_basic_equivalence(stateful, state_tuple, limits):
"""
Test that EpisodeRoller is equivalent to a
BasicRoller when run on a single environment.
"""
def env_fn():
return SimpleEnv(3, (4, 5), 'uint8')
env = env_fn()
model = SimpleModel(env.action_space.low.shape,
stateful=stateful,
state_tuple=state_tuple)
basic_roller = BasicRoller(env, model, **limits)
expected = basic_roller.rollouts()
batched_env = batched_gym_env([env_fn], sync=True)
ep_roller = EpisodeRoller(batched_env, model, **limits)
actual = ep_roller.rollouts()
_compare_rollout_batch(actual, expected)
|
4f9632bd088a0be806ca9c4f51e3c5bc55431513
| 23,882 |
def _find_crate_root_src(srcs, file_names=["lib.rs"]):
"""Finds the source file for the crate root."""
if len(srcs) == 1:
return srcs[0]
for src in srcs:
if src.basename in file_names:
return src
fail("No %s source file found." % " or ".join(file_names), "srcs")
|
dd3488b49dc6c315c3d35ead75a83008e7bcd962
| 23,883 |
def decode_check(string):
"""Returns the base58 decoded value, verifying the checksum.
:param string: The data to decode, as a string.
"""
number = b58decode(string)
# Converting to bytes in order to verify the checksum
payload = number.to_bytes(sizeof(number), 'big')
if payload and sha256d(payload[:-4])[:4] == payload[-4:]:
return payload[:-4]
else:
return None
|
716c0a92be68feb2a97cacfd57940e9e43fa07d9
| 23,884 |
import math
def rotate(origin, point, angle):
"""
Rotate a point counterclockwise by a given angle around a given origin.
The angle should be given in radians.
"""
ox, oy = origin
px, py = point
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return qx, qy
|
9c542fd45b8b53bad61121429377298bd9d7fd08
| 23,885 |
def get_users_info(token, ids):
"""Return a response from vk api users.get
:param token: access token
:param ids: users ids
:return: dict with users info
"""
args = {
'user_ids': ids,
'fields': 'city,bdate,connections,photo_200',
'access_token': token,
'v': settings.api_v
}
return send_vk_request('users.get', **args)[0]
|
02246232df0f3eebc530e05e1734e10f6be5ed9e
| 23,886 |
def create_consistencygroup(ctxt,
host='test_host@fakedrv#fakepool',
name='test_cg',
description='this is a test cg',
status='available',
availability_zone='fake_az',
volume_type_id=None,
cgsnapshot_id=None,
source_cgid=None,
**kwargs):
"""Create a consistencygroup object in the DB."""
cg = objects.ConsistencyGroup(ctxt)
cg.host = host
cg.user_id = ctxt.user_id or 'fake_user_id'
cg.project_id = ctxt.project_id or 'fake_project_id'
cg.status = status
cg.name = name
cg.description = description
cg.availability_zone = availability_zone
if volume_type_id:
cg.volume_type_id = volume_type_id
cg.cgsnapshot_id = cgsnapshot_id
cg.source_cgid = source_cgid
for key in kwargs:
setattr(cg, key, kwargs[key])
cg.create()
return cg
|
a7548774690eccdc0c44231ae10dd345c9e84eb8
| 23,887 |
def ndigit(num):
"""Returns the number of digits in non-negative number num"""
with nowarn(): return np.int32(np.floor(np.maximum(1,np.log10(num))))+1
|
ac83e0b31ce9213646e856aa47fa8bfba7d74a86
| 23,888 |
def data_store_folder_unzip_public(request, pk, pathname):
"""
Public version of data_store_folder_unzip, incorporating path variables
:param request:
:param pk:
:param pathname:
:return HttpResponse:
"""
return data_store_folder_unzip(request, res_id=pk, zip_with_rel_path=pathname)
|
7866ed0539a00e16cbe0cbb2ab6902faebfd4434
| 23,889 |
def privateDataOffsetLengthTest10():
"""
Offset doesn't begin immediately after last table.
>>> doctestFunction1(testPrivateDataOffsetAndLength, privateDataOffsetLengthTest10())
(None, 'ERROR')
"""
header = defaultTestData(header=True)
header["privOffset"] = header["length"] + 4
header["privLength"] = 1
header["length"] += 2
return packTestHeader(header)
|
8744b97da80151a24c480ad29d2b1625f0c687f0
| 23,890 |
def meanncov(x, y=[], p=0, norm=True):
"""
Wrapper to multichannel case of new covariance *ncov*.
Args:
*x* : numpy.array
multidimensional data (channels, data points, trials).
*y* = [] : numpy.array
multidimensional data. If not given the autocovariance of *x*
will be calculated.
*p* = 0: int
window shift of input data. It can be negative as well.
*norm* = True: bool
normalization - if True the result is divided by length of *x*,
otherwise it is not.
Returns:
*mcov* : np.array
covariance matrix
"""
chn, N, trls = x.shape
for tr in range(trls):
if tr == 0:
if not len(y):
mcov = ncov(x[:, :, tr], p=p, norm=norm)
else:
mcov = ncov(x[:, :, tr], y[:, :, tr], p=p, norm=norm)
continue
if not len(y):
mcov += ncov(x[:, :, tr], p=p, norm=norm)
else:
mcov += ncov(x[:, :, tr], y[:, :, tr], p=p, norm=norm)
return mcov/trls
|
926bc64a0b7e15822f40f705ac3e770a57bb2e11
| 23,891 |
def nasnet_6a4032(**kwargs):
"""
NASNet-A 6@4032 (NASNet-A-Large) model from 'Learning Transferable Architectures for Scalable Image Recognition,'
https://arxiv.org/abs/1707.07012.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_nasnet(
repeat=6,
penultimate_filters=4032,
init_block_channels=96,
final_pool_size=11,
extra_padding=False,
skip_reduction_layer_input=True,
in_size=(331, 331),
model_name="nasnet_6a4032",
**kwargs)
|
78f8862a69c12e8de85dc441e6b45e364d9b3385
| 23,892 |
def get_character_card(character_id, preston, access_token):
"""Get all the info for the character card.
Args:
character_id (int): ID of the character.
preston (preston): Preston object to make scope-required ESI calls.
access_token (str): Access token for the scope-required ESI calls.
Returns:
json: Character card information.
"""
# Get character.
characterPayload = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/characters/{}/?datasource=tranquility".format(str(character_id)))
if characterPayload.status_code != 200:
flash('There was an error ({}) when trying to retrieve character with ID {}'.format(str(characterPayload.status_code), str(character_id)), 'danger')
return None
characterJSON = characterPayload.json()
characterJSON['portrait'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/characters/{}/portrait/?datasource=tranquility".format(str(character_id))).json()
# Get corporation.
corporationPayload = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/?datasource=tranquility".format(str(characterJSON['corporation_id'])))
if corporationPayload.status_code != 200:
flash('There was an error ({}) when trying to retrieve corporation with ID {}'.format(str(corporationPayload.status_code), str(characterJSON['corporation_id'])), 'danger')
return None
characterJSON['corporation'] = corporationPayload.json()
characterJSON['corporation']['logo'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/icons/?datasource=tranquility".format(
str(characterJSON['corporation_id']))).json()
# Get alliance.
if 'alliance_id' in characterJSON:
alliancePayload = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/?datasource=tranquility".format(str(characterJSON['alliance_id'])))
if alliancePayload.status_code != 200:
flash('There was an error ({}) when trying to retrieve alliance with ID {}'.format(str(alliancePayload.status_code), str(characterJSON['alliance_id'])), 'danger')
return None
characterJSON['alliance'] = alliancePayload.json()
characterJSON['alliance']['logo'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/icons/?datasource=tranquility".format(
str(characterJSON['alliance_id']))).json()
# Get wallet.
walletIsk = SharedInfo['util'].make_esi_request_with_scope(preston, ['esi-wallet.read_character_wallet.v1'],
"https://esi.tech.ccp.is/latest/characters/{}/wallet/?datasource=tranquility&token={}".format(
str(character_id), access_token))
walletIskJSON = None
if walletIsk is not None:
walletIskJSON = walletIsk.json()
if walletIskJSON is not None and type(walletIskJSON) is not float:
flash('There was an error ({}) when trying to retrieve wallet for character.'.format(str(walletIsk.status_code)), 'danger')
return None
else:
characterJSON['wallet_isk'] = walletIskJSON
# Get skillpoints
characterSkills = SharedInfo['util'].make_esi_request_with_scope(preston, ['esi-skills.read_skills.v1'],
"https://esi.tech.ccp.is/latest/characters/{}/skills/?datasource=tranquility&token={}".format(
str(character_id), access_token))
characterSkillsJSON = None
if characterSkills is not None:
characterSkillsJSON = characterSkills.json()
if characterSkillsJSON is not None and 'error' in characterSkillsJSON:
flash('There was an error ({}) when trying to retrieve skills.'.format(str(characterSkills.status_code)), 'danger')
return None
else:
characterJSON['skills'] = characterSkillsJSON
return characterJSON
|
23f201833537a8a596e42acc41061a583ebead38
| 23,893 |
def expand_ALL_constant(model, fieldnames):
"""Replaces the constant ``__all__`` with all concrete fields of the model"""
if "__all__" in fieldnames:
concrete_fields = []
for f in model._meta.get_fields():
if f.concrete:
if f.one_to_one or f.many_to_many:
concrete_fields.append(f.name)
else:
concrete_fields.append(f.name)
i = fieldnames.index("__all__")
return fieldnames[:i] + concrete_fields + fieldnames[i + 1 :]
return fieldnames
|
8c44c9b16fd93ca1c9a4efddd1ea85b44d34dba3
| 23,894 |
def server(user, password):
"""A shortcut to use MailServer.
SMTP:
server.send_mail([recipient,], mail)
POP3:
server.get_mail(which)
server.get_mails(subject, sender, after, before)
server.get_latest()
server.get_info()
server.stat()
Parse mail:
server.show(mail)
server.get_attachment(mail)
"""
return MailServer(user, password)
|
acd2e2b69b6fe22ac8ae40cbe1b9d51a750e4e46
| 23,895 |
def calculate_hessian(model, data, step_size):
"""
Computes the mixed derivative using finite differences mathod
:param model: The imported model module
:param data: The sampled data in structured form
:param step_size: The dx time step taken between each
:returns: mixed derivative
"""
hessian = pd.DataFrame(0, index = np.arange(data.shape[0]), columns=pd.MultiIndex.from_product([model.output_names, model.perturbation_feature_pairs + model.feature_names], names=['model.output_names','model.feature_pairs']))
for output_name in model.output_names:
hessian_calculation_helpers = create_hessian_calculation_columns(model, output_name)
mixed_derivative = (data.loc[:, hessian_calculation_helpers[0]].values - data.loc[:, hessian_calculation_helpers[1]].values - data.loc[:, hessian_calculation_helpers[2]].values + data.loc[:, hessian_calculation_helpers[3]].values) / (step_size * step_size)
mixed_derivative *= np.sign(data.loc[:, hessian_calculation_helpers[1]].values + data.loc[:, hessian_calculation_helpers[2]].values - 2 * data.loc[:, hessian_calculation_helpers[0]].values)
hessian.loc[:, zip([output_name] * len(model.perturbation_feature_pairs), model.perturbation_feature_pairs)] = mixed_derivative
hessian.loc[:, zip([output_name] * len(model.feature_names), model.feature_names)] = np.array([(data.loc[:, (output_name,f)] - data.loc[:, (output_name,'core')]) / (step_size) for f in model.feature_names]).T
return hessian
|
44ebed355e5db7991080e55f786850fb6b0e8908
| 23,897 |
def get_quarterly_income_statements(symbol):
"""
Returns quarterly IS for the past 5 yrs.
"""
df = query_av(function="INCOME_STATEMENT", symbol=symbol, datatype='quarterlyReports')
return df
|
2b9094da22782d02ff5c6e4f32930c0816d213a9
| 23,898 |
def BRepBlend_HCurve2dTool_IsPeriodic(*args):
"""
:param C:
:type C: Handle_Adaptor2d_HCurve2d &
:rtype: bool
"""
return _BRepBlend.BRepBlend_HCurve2dTool_IsPeriodic(*args)
|
707895ece8aa032bcd6b747c5be0313102758957
| 23,899 |
def HLRBRep_SurfaceTool_OffsetValue(*args):
"""
:param S:
:type S: Standard_Address
:rtype: float
"""
return _HLRBRep.HLRBRep_SurfaceTool_OffsetValue(*args)
|
e1bfa0f05cac1afd82b599f9dd75f2a860b82b3b
| 23,900 |
def update_cv_validation_info(test_validation_info, iteration_validation_info):
"""
Updates a dictionary with given values
"""
test_validation_info = test_validation_info or {}
for metric in iteration_validation_info:
test_validation_info.setdefault(metric, []).append(iteration_validation_info[metric])
return test_validation_info
|
b2509026e968b1c428836c5313e9c5e824663d4f
| 23,901 |
from datetime import datetime
def epoch_to_datetime(epoch: str) -> datetime:
"""
:param epoch:
:return:
"""
return datetime.datetime.fromtimestamp(int(epoch) / 1000)
|
5044c30d4acace727858075d66032e563a5ece0d
| 23,902 |
import torch
def calculate_log_odds(
attr: Tensor,
k: float,
replacement_emb: Tensor,
model,
input_emb: Tensor,
attention_mask: Tensor,
prediction: Tensor,
) -> float:
"""
Log-odds scoring of an attribution
:param attr: Attribution scores for one sentence
:param k: top-k value (how many embeddings are replaced)
:param replacement_emb: embedding for one word that should be used as replacement
:param input_emb: Embedding of the sentence for which the attribution was computed
:param attention_mask: Original attention mask for the sentence
:param prediction: what model outputs for the input
"""
# get logits of masked prediction:
replaced_embed = replace_k_percent(attr, k, replacement_emb, input_emb)
new_pred = predict(model, replaced_embed, attention_mask)
# convert logits of (original) prediction and new_prediction to probabilities:
new_pred = softmax(new_pred, dim=1)
prediction = softmax(prediction, dim=1)
pred_i = torch.argmax(prediction).item()
return torch.log(new_pred[0, pred_i] / torch.max(prediction)).item()
|
d824e6705272eba3d24b061e2255700c05cb3f2a
| 23,903 |
import configparser
def parse_ini(path):
"""Simple ini as config parser returning the COHDA protocol."""
config = configparser.ConfigParser()
try:
config.read(path)
return True, ""
except (
configparser.NoSectionError,
configparser.DuplicateSectionError,
configparser.DuplicateOptionError,
configparser.NoOptionError,
configparser.InterpolationDepthError,
configparser.InterpolationMissingOptionError,
configparser.InterpolationSyntaxError,
configparser.InterpolationError,
configparser.MissingSectionHeaderError,
configparser.ParsingError,
) as err:
return False, slugify(err)
|
6d6f49f9d59c9ff72086cf9c6262e2bab829a369
| 23,905 |
import re
def number_of_positional_args(fn):
"""Return the number of positional arguments for a function, or None if the number is variable.
Looks inside any decorated functions."""
try:
if hasattr(fn, "__wrapped__"):
return number_of_positional_args(fn.__wrapped__)
if any(p.kind == p.VAR_POSITIONAL for p in signature(fn).parameters.values()):
return None
else:
return sum(
p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD)
for p in signature(fn).parameters.values()
)
except ValueError:
# signatures don't work for built-in operators, so try to extract from the docstring(!)
if hasattr(fn, "__doc__") and hasattr(fn, "__name__") and fn.__doc__ is not None:
specs = re.findall(r"{}\(.*?\)".format(re.escape(fn.__name__)), fn.__doc__)
specs = [re.sub(r", \*, .*\)", ")", re.sub(r"[[\]]", "", spec)) for spec in specs]
if any("*" in spec for spec in specs):
return None
elif specs:
return max(0 if spec.endswith("()") else spec.count(",") + 1 for spec in specs)
raise NotImplementedError("Bult-in operator {} not supported".format(fn))
|
0cb50ea4a8f21d35711ff4848809f9db530d0099
| 23,906 |
def check_space(space):
""" Check the properties of an environment state or action space """
if isinstance(space, spaces.Box):
dim = space.shape
discrete = False
elif isinstance(space, spaces.Discrete):
dim = space.n
discrete = True
else:
raise NotImplementedError('This type of space is not supported')
return dim, discrete
|
b5b186accb94afd1849312959f0c579b94f9300a
| 23,907 |
def finish_work(state):
"""Move all running nodes to done"""
state.progress.done = state.progress.done | state.progress.running
state.progress.running = set()
return state
|
649e14091737ef36db83b70591b09ee668a416f1
| 23,908 |
def show_submit_form(context, task, user, redirect, show_only_source=False):
"""Renders submit form for specified task"""
context["task"] = task
context["competition_ignored"] = user.is_competition_ignored(task.round.semester.competition)
context["constants"] = constants
context["redirect_to"] = redirect
context["show_only_source"] = show_only_source
if task.has_source:
context["source_form"] = SourceSubmitForm()
if task.has_description:
context["description_form"] = DescriptionSubmitForm()
if task.has_testablezip:
context["testablezip_form"] = TestableZipSubmitForm()
if task.has_text_submit:
context["text_submit_form"] = TextSubmitForm()
return context
|
7c09408f40da263d346d59ef49c85afb0d93e13a
| 23,909 |
def index_of_masked_word(sentence, bert):
"""Return index of the masked word in `sentence` using `bert`'s' tokenizer.
We use this function to calculate the linear distance between the target
and controller as BERT sees it.
Parameters
----------
sentence : str
Returns
-------
int
"""
tokens = bert.tokenize(sentence)
try:
return tokens.index(MASK)
except ValueError: # MASK not in sentence
return -1
|
b8a3999db7c7ca8379e60998bb9ad0617a878a6d
| 23,910 |
def vgg19(down=8, bn=False, o_cn=1, final='abs'):
"""VGG 19-layer model (configuration "E")
model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['E'], batch_norm=False), down=down, o_cn=o_cn, final=final)
model.load_state_dict(model_zoo.load_url(model_urls['vgg19']), strict=False)
return model
|
c20dcaed98a5dcfee1f78b3d7ec4f2a3d63cce1b
| 23,912 |
from vk_auth__requests_re import auth
import re
from bs4 import BeautifulSoup
def get_short_link_from_vk(login: str, password: str, link: str) -> str:
"""
Функция для получения короткой ссылки используя сервис vk.
"""
session, rs = auth(login, password)
# Страница нужна чтобы получить hash для запроса
rs = session.get('https://vk.com/cc')
match = re.search(r"Shortener\.submitLink\('(.+)'\)", rs.text)
if match is None:
raise Exception('Не удалось получить hash для Shortener')
shortener_hash = match.group(1)
# Данные для POST запроса для получения короткой ссылки
data = {
'act': 'shorten',
'link': link,
'al': '1',
'hash': shortener_hash,
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0',
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded',
}
rs = session.post('https://vk.com/cc', headers=headers, data=data)
print(rs)
root = BeautifulSoup(rs.content, 'lxml')
a_short_link = root.select_one('.shortened_link.shorten_list_header > a[href]')
return a_short_link['href']
|
aec82fc6ec9cce97a3a55926b21f5c3b35a56015
| 23,913 |
def student_list(request):
"""
List all students, or create a new student.
"""
if request.method == 'GET':
students = Student.objects.all()
serializer = StudentSerializer(students, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = StudentSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
2948d58c18b07d858e094eb4fbf0d5eac2fa1e6e
| 23,914 |
import time
def get_model_features(url, chromedriver):
"""For given model url, grab categories and tags for that model"""
try:
BROWSER.get(url)
time.sleep(5)
cats = BROWSER.find_elements_by_xpath("//section[@class='model-meta-row categories']//ul//a")
cats = [cat.text for cat in cats]
tags = BROWSER.find_elements_by_xpath("//section[@class='model-meta-row tags']//ul//a")
tags = [tag.text for tag in tags]
except:
print('Difficulty grabbing these features {}'.format(url))
print('Reload browser and try again')
cats, tags = None, None
return cats, tags
|
6fd902186c05027032977fb8ffec741559ae389f
| 23,915 |
def sent2labels(sent):
"""
Extracts gold labels for each sentence.
Input: sentence list
Output: list with labels list for each token in the sentence
"""
# gold labels at index 18
return [word[18] for word in sent]
|
11b4dc93c465d154e8bf8688a5b5c592b94e7265
| 23,916 |
def get_course_id_from_capa_module(capa_module):
"""
Extract a stringified course run key from a CAPA module (aka ProblemBlock).
This is a bit of a hack. Its intended use is to allow us to pass the course id
(if available) to `safe_exec`, enabling course-run-specific resource limits
in the safe execution environment (codejail).
Arguments:
capa_module (ProblemBlock|None)
Returns: str|None
The stringified course run key of the module.
If not available, fall back to None.
"""
if not capa_module:
return None
try:
return str(capa_module.scope_ids.usage_id.course_key)
except (AttributeError, TypeError):
# AttributeError:
# If the capa module lacks scope ids or has unexpected scope ids, we
# would rather fall back to `None` than let an AttributeError be raised
# here.
# TypeError:
# Old Mongo usage keys lack a 'run' specifier, and may
# raise a type error when we try to serialize them into a course
# run key. This is tolerable because such course runs are deprecated.
return None
|
dd76b1d6df12f6c7db0d095bb9a48940a850e5c7
| 23,917 |
def _gershgorin_circles_test(expr, var_to_idx):
"""Check convexity by computing Gershgorin circles without building
the coefficients matrix.
If the circles lie in the nonnegative (nonpositive) space, then the matrix
is positive (negative) definite.
Parameters
----------
expr : QuadraticExpression
the quadratic expression
var_to_idx : dict-like
map a var to a numerical index between 0 and n, where n is the number
of args of expr
Returns
-------
Convexity if the expression is Convex or Concave, None otherwise.
"""
n = expr.nargs()
row_circles = np.zeros(n)
diagonal = np.zeros(n)
for term in expr.terms:
i = var_to_idx[term.var1]
j = var_to_idx[term.var2]
if i == j:
diagonal[i] = term.coefficient
else:
coef = np.abs(term.coefficient / 2.0)
row_circles[j] += coef
row_circles[i] += coef
if np.all((diagonal - row_circles) >= 0):
return Convexity.Convex
if np.all((diagonal + row_circles) <= 0):
return Convexity.Concave
return None
|
282d873b4ef691a9402cc57f3a8181417b06ffc1
| 23,919 |
from typing import List
from faker import Faker
import random
from datetime import datetime
def make_papers(
*,
n_papers: int,
authors: AuthorList,
funders: FunderList,
publishers: PublisherList,
fields_of_study: List,
faker: Faker,
min_title_length: int = 2,
max_title_length: int = 10,
min_authors: int = 1,
max_authors: int = 10,
min_funders: int = 0,
max_funders: int = 3,
min_events: int = 0,
max_events: int = 100,
min_fields_of_study: int = 1,
max_fields_of_study: int = 20,
) -> PaperList:
"""Generate the list of ground truth papers.
:param n_papers: the number of papers to generate.
:param authors: the authors list.
:param funders: the funders list.
:param publishers: the publishers list.
:param fields_of_study: the fields of study list.
:param faker: the faker instance.
:param min_title_length: the min paper title length.
:param max_title_length: the max paper title length.
:param min_authors: the min number of authors for each paper.
:param max_authors: the max number of authors for each paper.
:param min_funders: the min number of funders for each paper.
:param max_funders: the max number of funders for each paper.
:param min_events: the min number of events per paper.
:param max_events: the max number of events per paper.
:param min_fields_of_study: the min fields of study per paper.
:param max_fields_of_study: the max fields of study per paper.
:return: the list of papers.
"""
papers = []
for i, _ in enumerate(range(n_papers)):
# Random title
n_words_ = random.randint(min_title_length, max_title_length)
title_ = faker.sentence(nb_words=n_words_)
# Random date
published_date_ = pendulum.from_format(faker.date(), "YYYY-MM-DD").date()
published_date_ = pendulum.date(year=published_date_.year, month=published_date_.month, day=published_date_.day)
# Output type
output_type_ = random.choice(OUTPUT_TYPES)
# Pick a random list of authors
n_authors_ = random.randint(min_authors, max_authors)
authors_ = random.sample(authors, n_authors_)
# Random funder
n_funders_ = random.randint(min_funders, max_funders)
if n_funders_ > 0:
funders_ = random.sample(funders, n_funders_)
else:
funders_ = []
# Random publisher
publisher_ = random.choice(publishers)
# Journal
journal_ = random.choice(publisher_.journals)
# Random DOI
doi_ = make_doi(publisher_.doi_prefix)
# Random events
n_events_ = random.randint(min_events, max_events)
events_ = []
today = datetime.now()
today_ts = int(today.timestamp())
start_date = datetime(today.year - 2, today.month, today.day)
start_ts = int(start_date.timestamp())
for _ in range(n_events_):
event_date_ = date_between_dates(start_ts=start_ts, end_ts=today_ts)
events_.append(Event(source=random.choice(EVENT_TYPES), event_date=event_date_))
# Fields of study
n_fos_ = random.randint(min_fields_of_study, max_fields_of_study)
level_0_index = 199
fields_of_study_ = [random.choice(fields_of_study[:level_0_index])]
fields_of_study_.extend(random.sample(fields_of_study, n_fos_))
# Open access status
is_free_to_read_at_publisher_ = True
if journal_.license is not None:
# Gold
license_ = journal_.license
else:
license_ = random.choice(LICENSES)
if license_ is None:
# Bronze: free to read on publisher website but no license
is_free_to_read_at_publisher_ = bool(random.getrandbits(1))
# Hybrid: license=True
# Green: in a 'repository'
is_in_institutional_repo_ = bool(random.getrandbits(1))
# Green not bronze: Not free to read at publisher but in a 'repository'
# Make paper
paper = Paper(
i,
doi=doi_,
title=title_,
published_date=published_date_,
output_type=output_type_,
authors=authors_,
funders=funders_,
journal=journal_,
publisher=publisher_,
events=events_,
fields_of_study=fields_of_study_,
license=license_,
is_free_to_read_at_publisher=is_free_to_read_at_publisher_,
is_in_institutional_repo=is_in_institutional_repo_,
)
papers.append(paper)
# Create paper citations
# Sort from oldest to newest
papers.sort(key=lambda p: p.published_date)
for i, paper in enumerate(papers):
# Create cited_by
n_papers_forwards = len(papers) - i
n_cited_by = random.randint(0, int(n_papers_forwards / 2))
paper.cited_by = random.sample(papers[i + 1 :], n_cited_by)
return papers
|
2422c176e3a170c646a33f4d372057451cc22775
| 23,920 |
def parse_cypher_file(path: str):
"""Returns a list of cypher queries in a file. Comments (starting with "//") will be filtered out and queries needs to be seperated by a semilicon
Arguments:
path {str} -- Path to the cypher file
Returns:
[str] -- List of queries
"""
def chop_comment(line):
# this function removes inline comments
comment_starter = "//"
possible_quotes = ["'", '"']
# a little state machine with two state varaibles:
in_quote = False # whether we are in a quoted string right now
quoting_char = None
backslash_escape = False # true if we just saw a backslash
comment_init = ""
for i, ch in enumerate(line):
if not in_quote:
if ch == comment_starter[len(comment_init)]:
comment_init += ch
else:
# reset comment starter detection
comment_init = ""
if comment_starter == comment_init:
# a comment started, just return the non comment part of the line
comment_init = ""
return line[: i - (len(comment_starter) - 1)]
if ch in possible_quotes:
# quote is starting
comment_init = ""
quoting_char = ch
in_quote = True
else:
if ch in quoting_char:
# quotes is ending
in_quote = False
quoting_char = None
return line
queries = []
with open(path) as f:
query = ""
for line in f:
line = chop_comment(line)
line = line.rstrip()
if line == "":
# empty line
continue
if not line.endswith("\n"):
query += "\n"
query += line
if line.endswith(";"):
query = query.strip(";")
queries.append(query)
query = ""
return queries
|
00c4d357dee9e77160875fd8abb6c2b23d60a091
| 23,921 |
def del_method():
"""del: Cleanup an item on destroy."""
# use __del__ with caution
# it is difficult to know when the object will be actually removed
context = ""
class _Destroyable:
def __del__(self):
nonlocal context
context = "burn the lyrics"
item = _Destroyable()
del item
return context
|
727e9cee3048ee0c43111dc193746e44f55f5881
| 23,922 |
def sample_user(email="[email protected]", password="testpass"):
"""Create a sample User"""
return get_user_model().objects.create_user(email, password)
|
c0c3c94072e35d2e385cdfafc0cac0bf73eba6b5
| 23,923 |
from typing import Optional
def _precedence(match):
"""
in a dict spec, target-keys may match many
spec-keys (e.g. 1 will match int, M > 0, and 1);
therefore we need a precedence for which order to try
keys in; higher = later
"""
if type(match) in (Required, Optional):
match = match.key
if type(match) in (tuple, frozenset):
if not match:
return 0
return max([_precedence(item) for item in match])
if isinstance(match, type):
return 2
if hasattr(match, "glomit"):
return 1
return 0
|
9b82462632a5b087e4863144338d746c7687b06a
| 23,924 |
def experiment_set_reporting_data(connection, **kwargs):
"""
Get a snapshot of all experiment sets, their experiments, and files of
all of the above. Include uuid, accession, status, and md5sum (for files).
"""
check = CheckResult(connection, 'experiment_set_reporting_data')
check.status = 'IGNORE'
exp_sets = {}
search_query = '/search/?type=ExperimentSetReplicate&experimentset_type=replicate&sort=-date_created'
set_hits = ff_utils.search_metadata(search_query, key=connection.ff_keys, page_limit=20)
# run a second search for status=deleted and status=replaced
set_hits_del = ff_utils.search_metadata(search_query + '&status=deleted&status=replaced',
key=connection.ff_keys, page_limit=20)
set_hits.extend(set_hits_del)
for hit in set_hits:
add_to_report(hit, exp_sets)
check.full_output = exp_sets
return check
|
9d0e4279788522dc2d12b1693a8c12de06b894de
| 23,925 |
def display2D(data,show=None,xsize=None,ysize=None,pal=None):
"""display2D(data,show=None,xsize=None,ysize=None) - create color image object
from 2D list or array data, and the color palette is extracted from 'pal.dat',
if show=1 specified by default a 300x300 window shows the data image
xsize, ysize override the default 300 pixel setting
pal[768] - if specified the color table palette will be used
"""
if type(data) == type([]):
data = array(data)
w,h = data.shape[1],data.shape[0]
if pal == None:
file = "pal.dat"
if os.path.isfile(file) == 0:
CT = readCT()
pal = readPalette()
pixel = data2pixel(data,p=pal)
im = Image.new('RGB',(w,h))
for j in range(h):
for i in range(w):
ij = i+j*w
im.putpixel((i,j),pixel[ij])
if show != None:
if xsize == None:
xsize = 300
if ysize == None:
ysize = 300
resizeImage(im,xsize,ysize)
return im
|
59c8905e29ac680b01821413305b3705e93b4c70
| 23,926 |
def read_port_await_str(expected_response_str):
"""
It appears that the Shapeoko responds with the string "ok" (or an "err nn" string) when
a command is processed. Read the shapeoko_port and verify the response string in this routine.
If an error occurs, a message.
:param expected_response_str: a string, typically "ok"
:return: True if "ok" received, otherwise False
"""
assert isinstance(expected_response_str, str)
global __shapeoko_port
response_str = __shapeoko_port.readline().strip()
if expected_response_str != response_str:
print "RESPONSE_STR_LEN({0}), RESPONSE_STR({1})".format(len(response_str), response_str)
return expected_response_str == response_str
|
8bbab3158a9afb0769746eeb590812b0b401d557
| 23,927 |
import inspect
import types
def doify(f, *, name=None, tock=0.0, **opts):
"""
Returns Doist compatible copy, g, of converted generator function f.
Each invoction of doify(f) returns a unique copy of doified function f.
Imbues copy, g, of converted generator function, f, with attributes used by
Doist.enter() or DoDoer.enter().
Allows multiple instances of copy, g, of generator function, f, each with
unique attributes.
Usage:
def f():
pass
c = doify(f, name='c')
Parameters:
f is generator function
name is new function name for returned doified copy g. Default is to copy
f.__name__
tock is default tock attribute of doified copy g
opts is dictionary of remaining parameters that becomes .opts attribute
of doified copy g
Based on:
https://stackoverflow.com/questions/972/adding-a-method-to-an-existing-object-instance
"""
g = helping.copyfunc(f, name=name)
g.done = None # default done state
g.tock = tock # default tock attributes
g.opts = dict(opts) # default opts attribute
if inspect.ismethod(f): # f.__self__ instance method
g = types.MethodType(g, f.__self__) # make g a method of f.__self__ only
return g
|
b0f028c1311a47cb191306aa06985e70cdce2a64
| 23,928 |
def deserialize(config, custom_objects=None):
"""Inverse of the `serialize` function.
Args:
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during deserialization.
Returns:
A Keras Optimizer instance.
"""
# loss_scale_optimizer has a direct dependency of optimizer, import here
# rather than top to avoid the cyclic dependency.
from keras.mixed_precision import loss_scale_optimizer # pylint: disable=g-import-not-at-top
all_classes = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD,
'ftrl': ftrl.Ftrl,
'lossscaleoptimizer': loss_scale_optimizer.LossScaleOptimizer,
# LossScaleOptimizerV1 deserializes into LossScaleOptimizer, as
# LossScaleOptimizerV1 will be removed soon but deserializing it will
# still be supported.
'lossscaleoptimizerv1': loss_scale_optimizer.LossScaleOptimizer,
}
# Make deserialization case-insensitive for built-in optimizers.
if config['class_name'].lower() in all_classes:
config['class_name'] = config['class_name'].lower()
return deserialize_keras_object(
config,
module_objects=all_classes,
custom_objects=custom_objects,
printable_module_name='optimizer')
|
acf9290d84d99bd5d630b8ab38734ce245303f6e
| 23,929 |
def unquote_to_bytes(string):
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if isinstance(string, str):
string = string.encode('utf-8')
res = string.split(b'%')
res[0] = res[0]
for i in range(1, len(res)):
item = res[i]
try:
res[i] = bytes([int(item[:2], 16)]) + item[2:]
except ValueError:
res[i] = b'%' + item
return b''.join(res)
|
7148b06e10fc92864d9875c785a6397a0a4950aa
| 23,930 |
def validators(*chained_validators):
"""
Creates a validator chain from several validator functions.
:param chained_validators:
:type chained_validators:
:return:
:rtype:
"""
def validator_chain(match): # pylint:disable=missing-docstring
for chained_validator in chained_validators:
if not chained_validator(match):
return False
return True
return validator_chain
|
bba03b12c8de007882320e23377a32072734844a
| 23,931 |
def jacobian(vf):
"""Compute the jacobian of a vectorfield pointwise."""
vf0_dz, vf0_dy, vf0_dx = image_gradients(vf[..., 0:1])
vf1_dz, vf1_dy, vf1_dx = image_gradients(vf[..., 1:2])
vf2_dz, vf2_dy, vf2_dx = image_gradients(vf[..., 2:3])
r1 = tf.concat([vf0_dz[..., None], vf0_dy[..., None], vf0_dx[..., None]], axis=-1)
r2 = tf.concat([vf1_dz[..., None], vf1_dy[..., None], vf1_dx[..., None]], axis=-1)
r3 = tf.concat([vf2_dz[..., None], vf2_dy[..., None], vf2_dx[..., None]], axis=-1)
return tf.concat([r1, r2, r3], axis=-2)
|
c1415852f3bf919818b59e7f6524e9773021ec1f
| 23,932 |
def insert_ones(y, segment_end_ms, Ty, steps=50, background_len=10000.0):
"""Update the label vector y
The labels of the output steps strictly after the end of the segment
should be set to 1. By strictly we mean that the label of segment_end_y should be 0 while, the
50 followinf labels should be ones.
Args:
y (ndarray): numpy array of shape (1, Ty), the labels of the training example
segment_end_ms (int): the end time of the segment in ms
steps (int): number of output steps after to segment to put the label
background_len (float): number of time steps in the sample
Returns:
y (ndarray): updated labels
"""
# duration of the background (in terms of spectrogram time-steps)
segment_end_y = int(segment_end_ms * Ty / background_len)
for i in range(segment_end_y+1, segment_end_y+steps+1):
if i < Ty:
y[0, i] = 1.0
return y
|
f820a4d9d8720ee5eae916ffe865a822cea15135
| 23,933 |
def stream_name_mapping(stream, exclude_params=['name'], reverse=False):
"""
Return a complete dictionary mapping between stream parameter names
to their applicable renames, excluding parameters listed in
exclude_params.
If reverse is True, the mapping is from the renamed strings to the
original stream parameter names.
"""
filtered = [k for k in stream.param if k not in exclude_params]
mapping = {k:stream._rename.get(k,k) for k in filtered}
if reverse:
return {v:k for k,v in mapping.items()}
else:
return mapping
|
5a9c9ab80ad470c45d22f2e360cc53c979300825
| 23,934 |
def newton(f, df, x0, tolx, tolf, nmax):
"""
Algoritmo di Newton per il calcolo dello zero di una funzione.
:param f: la funzione di cui calcolare lo zero
:param df: la derivata della funzione di cui calcolare lo zero
:param x0: il valore di innesco
:param tolx: la tolleranza sull'incremento
:param tolf: la tolleranza sul valore della funzione
:param nmax: il numero massimo di iterazioni
:return: (zero della funzione, numero di iterazioni, iterazioni)
"""
def delta(value): return f(value) / df(value) if df(value) > np.spacing(1) else exit("Derivata nulla")
def prossimax(value): return value - delta(value)
x = prossimax(x0)
fx = f(x)
it, xk = 1, [x]
while it < nmax and abs(fx) >= tolf and abs(delta(x)) >= tolx * abs(x):
x = prossimax(x)
xk.append(x)
fx = f(x)
it += 1
return x, it, xk
|
c362e2495034eaeb8a87e1d7363ec9f7a5b146a0
| 23,935 |
def preprocess(x, scale='std', clahe=True):
""" Preprocess the input features.
Args:
x:
batch of input images
clahe:
perform a contrast limited histogram equalization before scaling
scale:
'normalize' the data into a range of 0 and 1 or 'standardize' the
data to zero mean and standard deviation 1
Returns:
The preprocessed input features, eventually reduced to single channel
"""
if clahe is True:
x = np.array([np.expand_dims(rgb2clahe(img), 2) for img in x])
x = np.float32(x)
if scale is not None and scale.lower() in ['norm', 'normalize']:
x /= x.max()
elif scale is not None and scale.lower() in ['std', 'standardize']:
mean, std = x.mean(), x.std()
x = (x - mean) / (std + np.finfo(float).eps)
return x
|
dae27316487bbcad3f94fbe538b9c90ffc3dd845
| 23,936 |
def endswith(s, tags):
"""除了模拟str.endswith方法,输入的tag也可以是可迭代对象
>>> endswith('a.dvi', ('.log', '.aux', '.dvi', 'busy'))
True
"""
if isinstance(tags, str):
return s.endswith(tags)
elif isinstance(tags, (list, tuple)):
for t in tags:
if s.endswith(t):
return True
else:
raise TypeError
return False
|
c30228f412552d08d09d9c50bdc20f4401477ba5
| 23,937 |
from typing import Sequence
def parse(data):
"""
Takes the byte string of an x509 certificate and returns a dict
containing the info in the cert
:param data:
The certificate byte string
:return:
A dict with the following keys:
- version
"""
structure = load(data)
if structure[0][0] != Sequence:
return None
body = structure[0][1]
if len(body) != 3:
return None
algo_oid_map = {
'1.2.840.113549.1.1.1': 'rsaEncryption',
'1.2.840.113549.1.1.2': 'md2WithRSAEncryption',
'1.2.840.113549.1.1.4': 'md5WithRSAEncryption',
'1.2.840.113549.1.1.5': 'sha1WithRSAEncryption',
'1.2.840.113549.1.1.11': 'sha256WithRSAEncryption',
'1.2.840.113549.1.1.12': 'sha384WithRSAEncryption',
'1.2.840.113549.1.1.13': 'sha512WithRSAEncryption'
}
cert_struct = body[0][1]
output = {}
output['algorithm'] = body[1][1][0][1]
if output['algorithm'] in algo_oid_map:
output['algorithm'] = algo_oid_map[output['algorithm']]
output['signature'] = body[2][1]
i = 0
# At least one CA cert on Windows was missing the version
if cert_struct[i][0] == 0x00:
output['version'] = cert_struct[i][1][0][1] + 1
i += 1
else:
output['version'] = 3
output['serialNumber'] = cert_struct[i][1]
i += 1
# The algorithm is repeated at cert_struct[i][1][0][1]
i += 1
output['issuer'] = parse_subject(cert_struct[i])
i += 1
output['notBefore'] = cert_struct[i][1][0][1]
output['notAfter'] = cert_struct[i][1][1][1]
i += 1
output['subject'] = parse_subject(cert_struct[i])
i += 1
output['publicKeyAlgorithm'] = cert_struct[i][1][0][1][0][1]
if output['publicKeyAlgorithm'] in algo_oid_map:
output['publicKeyAlgorithm'] = algo_oid_map[output['publicKeyAlgorithm']]
output['subjectPublicKey'] = cert_struct[i][1][1][1]
i += 1
for j in range(i, len(cert_struct)):
if cert_struct[j][0] == 0x01:
# Issuer unique identifier
pass
elif cert_struct[j][0] == 0x02:
# Subject unique identifier
pass
elif cert_struct[j][0] == 0x03:
output['subjectAltName'] = parse_subject_alt_name(cert_struct[j])
return output
|
17e8eceb8e15078d9d4a5a82a95d1f92be3e7819
| 23,938 |
def to_auto_diff(x):
"""
Transforms x into a automatically differentiated function (ADF),
unless it is already an ADF (or a subclass of it), in which case x is
returned unchanged.
Raises an exception unless 'x' belongs to some specific classes of
objects that are known not to depend on ADF objects (which then cannot be
considered as constants).
"""
if isinstance(x, ADF):
return x
#! In Python 2.6+, numbers.Number could be used instead, here:
if isinstance(x, CONSTANT_TYPES):
# constants have no derivatives to define:
return ADF(x, {}, {}, {})
raise NotImplementedError(
'Automatic differentiation not yet supported for {0:} objects'.format(
type(x))
)
|
9824759c5ee3bee8c00ed7218ae502099b162200
| 23,939 |
def voc_eval(class_recs: dict,
detect: dict,
iou_thresh: float = 0.5,
use_07_metric: bool = False):
"""
recall, precision, ap = voc_eval(class_recs, detection,
[iou_thresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
Please make sure that the class_recs only have one class annotations.
precision = tp / (tp + fp)
recall = tp / (tp + fn)
Args:
class_recalls: recalls dict of a class
class_recs[image_name]={'bbox': []}.
detection: Path to annotations
detection={'image_ids':[], bbox': [], 'confidence':[]}.
[iou_thresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
Returns:
a dict of result including true_positive_number, false_positive_number,
recall, precision and average_precision.
Raises:
TypeError: the data format is not np.ndarray.
"""
# format data
# class_rec data load
npos = 0
for imagename in class_recs.keys():
if not isinstance(class_recs[imagename]['bbox'], np.ndarray):
raise TypeError
detected_num = class_recs[imagename]['bbox'].shape[0]
npos += detected_num
class_recs[imagename]['det'] = [False] * detected_num
# detections data load
image_ids = detect['image_ids']
confidence = detect['confidence']
BB = detect['bbox']
if not isinstance(confidence, np.ndarray):
raise TypeError
if not isinstance(BB, np.ndarray):
raise TypeError
# sort by confidence
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
iou_max = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
overlaps = compute_overlaps(BBGT, bb)
iou_max = np.max(overlaps)
iou_max_index = np.argmax(overlaps)
if iou_max > iou_thresh:
if not R['det'][iou_max_index]:
tp[d] = 1.
R['det'][iou_max_index] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
true_positive_number = tp[-1]
false_positive_number = fp[-1]
recall = tp / float(npos)
# avoid divide by zero in case the first detection matches
# a difficult ground truth
precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
average_precision = voc_ap(recall, precision, use_07_metric)
result = {}
result['true_positive_number'] = true_positive_number
result['false_positive_number'] = false_positive_number
result['recall'] = recall
result['precision'] = precision
result['average_precision'] = average_precision
return result
|
5842a31523aff85342c69300bcf1aeffa98e14e7
| 23,940 |
def birth() -> character.Character:
"""Gives birth to krydort."""
krydort = character.Character('Krydort Wolverry')
krydort.attributes.INT = 8
krydort.attributes.REF = 6
krydort.attributes.DEX = 6
krydort.attributes.BODY = 6
krydort.attributes.SPD = 4
krydort.attributes.EMP = 10
krydort.attributes.CRA = 7
krydort.attributes.WILL = 10
krydort.attributes.LUCK = 3
krydort.skills['INT'].Business = 4
krydort.skills['INT'].Education = 3
krydort.skills['INT'].CommonSpeech = 8
krydort.skills['INT'].ElderSpeech = 4
krydort.skills['INT'].Dwarven = 2
krydort.skills['INT'].Streetwise = 4
krydort.skills['REF'].DodgingEscape = 2
krydort.skills['REF'].SmallBlades = 4
krydort.skills['REF'].Swordsmanship = 2
krydort.skills['DEX'].Athletics = 2
krydort.skills['BODY'].Endurance = 2
krydort.skills['EMP'].Charisma = 6
krydort.skills['EMP'].Deceit = 4
krydort.skills['EMP'].Gambling = 2
krydort.skills['EMP'].GroomingAndStyle = 1
krydort.skills['EMP'].HumanPerception = 4
krydort.skills['EMP'].Persuasion = 6
krydort.skills['WILL'].Courage = 2
krydort.skills['WILL'].ResistCoercion = 5
return krydort
|
09b81202cd39907f98b7d267445b5a728562288d
| 23,941 |
def LinearCombinationOfContVars(doc:NexDoc, resultName, contVar1:NexVar, coeff1, contVar2:NexVar, coeff2):
"""Calculates a linear combination of two continuous variables."""
return NexRun("LinearCombinationOfContVars", locals())
|
86e6438b364b78a6cd6cea31593a1304f090df56
| 23,943 |
def handler404(request, exception): # pylint: disable=unused-argument
"""404: NOT FOUND ERROR handler"""
response = render_to_string(
"404.html", request=request, context=get_base_context(request)
)
return HttpResponseNotFound(response)
|
2e93076db5d3170a9c6d11b68facbabcddd7649f
| 23,944 |
def get_notification_user(operations_shift):
"""
Shift > Site > Project > Reports to
"""
if operations_shift.supervisor:
supervisor = get_employee_user_id(operations_shift.supervisor)
if supervisor != doc.owner:
return supervisor
operations_site = frappe.get_doc("Operations Site", operations_shift.site)
if operations_site.account_supervisor:
account_supervisor = get_employee_user_id(operations_site.account_supervisor)
if account_supervisor != doc.owner:
return account_supervisor
if operations_site.project:
project = frappe.get_doc("Project", operations_site.project)
if project.account_manager:
account_manager = get_employee_user_id(project.account_manager)
if account_manager != doc.owner:
return account_manager
|
9bd829f9ba3f1424221d9abab9775f2964ad48ec
| 23,945 |
from typing import Optional
from typing import List
from typing import Dict
from typing import Any
def list_groups(
namespace: str = "default", account_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None
) -> List[Dict[str, Any]]:
"""List all QuickSight Groups.
Parameters
----------
namespace : str
The namespace. Currently, you should set this to default .
account_id : str, optional
If None, the account ID will be inferred from your boto3 session.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
List[Dict[str, Any]]
Groups.
Examples
--------
>>> import awswrangler as wr
>>> groups = wr.quicksight.list_groups()
"""
return _list(
func_name="list_groups",
attr_name="GroupList",
account_id=account_id,
boto3_session=boto3_session,
Namespace=namespace,
)
|
d599262dc2045a781f840ee82f2d7324605f41ab
| 23,946 |
def big_diagram(BFIELD=1000,output='S0'):
"""
Main code to plot 'big' diagram with the following components:
- Theoretical absorption spectrum (top panel)
- Breit Rabi diagram for 0 to specified B-field (left)
- Energy levels for ground and excited states (bottom panel)
- Arrows for each transition, underneath the corresponding part of the spectrum
"""
##
## First part - calculate the absorption spectrum
##
# Define the detuning axis based on what the magnetic field strength is (in GHz)
# Values for BFIELD should be given in Gauss (1 G = 1e-4 T)
Dmax = max(6,5 + (BFIELD/1e4 * 3 * mu_B))
det_range = np.linspace(-Dmax,Dmax,int(3e4))
# Input parameters to calculate the spectrum
Bfield = BFIELD #alias
ELEM = 'Rb'
DLINE = 'D2'
RB85FRAC = 0.0 # Pure Rb87
LCELL = 1e-3
TEMP = 100 # C ~ 373K
# Voigt, horizontal polarisation
pol = [1,0,0]
p_dict = {'T':TEMP,'lcell':LCELL,'Elem':ELEM,'rb85frac':RB85FRAC,'Dline':DLINE,
'Bfield':BFIELD,'Btheta':90*np.pi/180,'Bphi':45*np.pi/180,'BoltzmannFactor':True}
[S0,S1,S2,S3] = get_spectra(det_range*1e3,pol,p_dict,outputs=['S0','S1','S2','S3'])
lenergy87, lstrength87, ltransno87, lgl87, lel87, \
renergy87, rstrength87, rtransno87, rgl87, rel87, \
zenergy87, zstrength87, ztransno87, zgl87, zel87 = calc_chi_energies([1], p_dict)
##
## Second part - calculate the Breit-Rabi diagram
##
BreitRabiVals = np.linspace(0,BFIELD,2000)
BreitRabiVals = np.append(BreitRabiVals,BreitRabiVals[-1])
Bstep = BreitRabiVals[1] - BreitRabiVals[0]
# Calculate Zeeman-shifted energy levels in parallel (uses multiprocessing module)
po = Pool()
res = po.map_async(eval_energies,(("Rb87","D2",BreitRabiVals[k],) for k in range(len(BreitRabiVals))))
energies = res.get()
gnd_energies = np.zeros((len(energies[0][0]),len(BreitRabiVals)))
exc_energies = np.zeros((len(energies[0][1]),len(BreitRabiVals)))
for jj, energyB in enumerate(energies):
gnd_energies[:,jj] = energyB[0]
exc_energies[:,jj] = energyB[1]
po.close()
po.join()
# Energies at largest B-field value
final_gnd_energies, final_exc_energies = eval_energies(("Rb87","D2",BreitRabiVals[-1]))
##
## Third part - calculate state decomposition
##
## Below values are for Rb-87. **Change for other atoms**.
I=3.0/2; L=0; S=1.0/2; J=1.0/2
output_states = AM_StateDecomp(I,L,S,J,atom='Rb',B=BFIELD/1e4)
print('\nState decomposition at B = ',BFIELD/1e4)
print(output_states)
##
## Fourth part - arrange the plot panels
##
fig = plt.figure("Big diagram at "+str(BFIELD/1e4)+' T',facecolor=None,figsize=(12,8))
plt.clf()
# Subplot arrangement
xBR = 2
xspec = 6
yBRe = 3
yBRg = 5
yspec = 4
xx = xBR + xspec
yy = yBRe + yBRg + yspec
ax_spec = plt.subplot2grid((yy,xx),(0,xBR),colspan=xspec,rowspan=yspec)
ax_excBR = plt.subplot2grid((yy,xx),(yspec,0),colspan=xBR,rowspan=yBRe)
ax_gndBR = plt.subplot2grid((yy,xx),(yspec+yBRe,0),colspan=xBR,rowspan=yBRg,sharex=ax_excBR)
ax_eLev = plt.subplot2grid((yy,xx),(yspec,xBR),colspan=xspec,rowspan=yBRe,sharex=ax_spec,sharey=ax_excBR)
ax_gLev = plt.subplot2grid((yy,xx),(yspec+yBRe,xBR),colspan=xspec,rowspan=yBRg,sharex=ax_spec,sharey=ax_gndBR)
# Turn off axes for eLev and gLev axes
for ax in [ax_eLev,ax_gLev]:
ax.set_frame_on(False)
for parameter in [ax.get_xticklabels(),ax.get_yticklabels(),ax.get_xticklines(),ax.get_yticklines()]:
plt.setp(parameter,visible=False)
plt.setp(ax_excBR.get_xticklabels(),visible=False)
ax_excBR.spines['right'].set_color('none')
ax_gndBR.spines['right'].set_color('none')
ax_gndBR.spines['top'].set_color('none')
ax_excBR.spines['top'].set_color('none')
ax_excBR.spines['bottom'].set_color('none')
ax_gndBR.xaxis.set_ticks_position('bottom')
ax_excBR.xaxis.set_ticks_position('none')
ax_excBR.tick_params(axis='y',left=True,right=False)
ax_gndBR.tick_params(axis='y',left=True,right=False)
# axis labels
ax_spec.set_xlabel('Detuning (GHz)')
ax_spec.xaxis.set_label_position('top')
ax_spec.tick_params(axis='x',bottom=True,top=True,labelbottom=False,labeltop=True)
ax_excBR.set_ylabel('$5P_{3/2}$ energy (GHz)')
ax_gndBR.set_ylabel('$5S_{1/2}$ energy (GHz)')
ax_gndBR.set_xlabel('Magnetic Field (T)')
fig.subplots_adjust(left=0.07,right=0.98,top=0.93,bottom=0.085,hspace=0.34,wspace=0)
#Ghost axes for actually plotting the Breit-Rabi data
eleft = ax_excBR.get_position().extents[0:2]
eright = ax_eLev.get_position().extents[2:]
gleft = ax_gndBR.get_position().extents[0:2]
gright = ax_gLev.get_position().extents[2:]
ax_e_bound = np.append(eleft,eright-eleft)
ax_g_bound = np.append(gleft,gright-gleft)
print('\nAxes bounds for B-R diagram:')
print(ax_e_bound)
print(ax_g_bound)
ax_e = fig.add_axes(ax_e_bound,frameon=False,facecolor=None)
ax_g = fig.add_axes(ax_g_bound,frameon=False,facecolor=None)
ax_g.set_xticks([])
ax_g.set_yticks([])
ax_e.set_xticks([])
ax_e.set_yticks([])
##
## Fifth part - Add the data to the figure
##
# Edit last magnetic field value
BreitRabiVals[-1] = BreitRabiVals[-2] * ((xspec + xBR) / xBR)
print('\nMagnetic field values (Breit-Rabi diagram)')
print(BreitRabiVals)
if output == 'S0':
ax_spec.set_ylabel('Transmission, $S_{0}$')
ax_spec.plot(det_range,S0.real,lw=2,color=d_black)
elif output == 'S1':
ax_spec.set_ylabel('$S_{1}$')
ax_spec.plot(det_range,S1.real,lw=2,color=d_black)
elif output == 'S2':
ax_spec.set_ylabel('$S_{2}$')
ax_spec.plot(det_range,S2.real,lw=2,color=d_black)
elif output == 'S3':
ax_spec.set_ylabel('$S_{3}$')
ax_spec.plot(det_range,S3.real,lw=2,color=d_black)
#convert to GHz from MHz
exc_energies /= 1e3
gnd_energies /= 1e3
final_exc_energies /= 1e3
final_gnd_energies /= 1e3
for energy in exc_energies[int(len(final_exc_energies)/3):]:
ax_e.plot(BreitRabiVals/1e4,energy,color=d_black,lw=1)
for energy in gnd_energies:
ax_g.plot(BreitRabiVals/1e4,energy,color=d_black,lw=1.5)
ax_excBR.set_xlim(0,(Bfield + 10*Bstep)/1e4)
for ax in [ax_g,ax_e]:
ax.set_ylim(ax.get_ylim()[0]*1.15,ax.get_ylim()[1]*1.15)
ax.set_xlim(BreitRabiVals[0]/1e4, BreitRabiVals[-1]/1e4)
ax_excBR.set_ylim(ax_e.get_ylim())
ax_gndBR.set_ylim(ax_g.get_ylim())
ax_spec.set_xlim(det_range[0],det_range[-1])
ax_spec.set_ylim(ax_spec.get_ylim()[0],1.01)
##
## Sixth part - Add arrows for each transition
##
print('Sigma minus transitions:')
print(sorted(lenergy87))
print('Sigma plus transitions:')
print(sorted(renergy87))
print('Pi transitions:')
print(sorted(zenergy87))
for energy in lenergy87:
ax_spec.axvline(energy/1e3,color=d_purple,lw=1.5)
for energy in renergy87:
ax_spec.axvline(energy/1e3,color=d_blue,lw=1.5)
for energy in zenergy87:
ax_spec.axvline(energy/1e3,color=d_olive,lw=1.5,linestyle='dashed')
# Coordinates for arrows - sigma minus transitions (purple)
xy1s = zip(lenergy87/1e3,lgl87/1e3)
xy2s = zip(lenergy87/1e3,lel87/1e3)
ecol = d_purple
fcol = 0.5 * (np.array(d_lightpurple) + np.array(d_purple))
alpha = 0.9
#styles = ['solid','solid','solid','solid','dashed','dashed','dashed','dashed']
for xy1,xy2,strength in zip(xy1s,xy2s,lstrength87):
#if (xy1[0] > 15) or (xy1[0]<-15):
coordsA = 'data'
coordsB = 'data'
con = ConnectionPatch(xy1,xy2,coordsA,coordsB,
arrowstyle="simple",shrinkB=0,
axesA=ax_gLev,axesB=ax_eLev,mutation_scale=25,
ec=ecol,fc=fcol,lw=1.25,alpha=alpha)
ax_gLev.add_artist(con)
# Coordinates for arrows - sigma plus transitions (blue)
xy1s = zip(renergy87/1e3,rgl87/1e3)
xy2s = zip(renergy87/1e3,rel87/1e3)
ecol = d_blue
fcol = 0.5 * (np.array(d_midblue) + np.array(d_blue))
alpha = 0.9
#styles = ['solid','solid','solid','solid','dashed','dashed','dashed','dashed']
for xy1,xy2,strength in zip(xy1s,xy2s,rstrength87):
#if (xy1[0] > 15) or (xy1[0]<-15):
coordsA = 'data'
coordsB = 'data'
con = ConnectionPatch(xy1,xy2,coordsA,coordsB,
arrowstyle="simple",shrinkB=0,
axesA=ax_gLev,axesB=ax_eLev,mutation_scale=25,
ec=ecol,fc=fcol,lw=1.25,alpha=alpha)
ax_gLev.add_artist(con)
# Coordinates for arrows - pi transitions (olive)
xy1s = zip(zenergy87/1e3,zgl87/1e3)
xy2s = zip(zenergy87/1e3,zel87/1e3)
ecol = d_darkolive
fcol = d_olive#darkyellow#olive #(0.16,0.85,0.16)
alpha = 0.6
#styles = ['solid','solid','solid','solid','dashed','dashed','dashed','dashed']
for xy1,xy2,strength in zip(xy1s,xy2s,zstrength87):
#if (xy1[0] < 15) and (xy1[0]>-15):
coordsA = 'data'
coordsB = 'data'
con = ConnectionPatch(xy1,xy2,coordsA,coordsB,
arrowstyle="simple",shrinkB=0,
axesA=ax_gLev,axesB=ax_eLev,mutation_scale=25,
ec=ecol,fc=fcol,lw=1.25,alpha=alpha)
ax_gLev.add_artist(con)
# Add B-field info to plot - top left
fig.text(0.1,0.78-0.03,'L = '+str(LCELL*1e3)+' mm',size=18,ha='center')
fig.text(0.1,0.82-0.03,r'T = '+str(TEMP)+' $^{\circ}$C',size=18,ha='center')
fig.text(0.1,0.86-0.03,'B = '+str(Bfield/1e4)+' T',size=18,ha='center')
fig.text(0.1,0.90-0.03,str(DLINE)+' Line',size=18,ha='center')
fig.text(0.1,0.94-0.03,'$^{87}$Rb',size=18,ha='center')
##
## Finally - show the plot and save the figure
##
ax_spec.set_xlim(-Dmax,Dmax)
# fig.savefig('./BR_plot_'+str(Bfield)+str(output)'.pdf',dpi=300)
# fig.savefig('./BR_plot_'+str(Bfield)+str(output)'.png',dpi=300)
plt.show()
print('--- End of calculations ---')
return fig
|
ad72a778b067fa09d42a514e85ab499b52f899c4
| 23,948 |
def referenced_fmr(X=None, Y=None, Z=None,
delta_x_idx:{"type": "int",
"min":0, "max": 1,
"hint": "Distance of the background signal (in x-index units)"}=0,):
"""
For each X-index, calculate Z[X]-X([+delta_x_idx], X will be set to X[X]
"""
slc_x_val = slice(0, X.shape[0] - delta_x_idx)
slc_x_delta = slice(delta_x_idx, X.shape[0])
return X[slc_x_val, :], Y[slc_x_val, :], Z[slc_x_val,:]/Z[slc_x_delta,:]
|
fb0768d2de813b5fde3c167fcb0d738b4476e802
| 23,949 |
def categorize_dish(dish_name, dish_ingredients):
"""
:param dish_name: str
:param dish_ingredients: list
:return: str "dish name: CATEGORY"
This function should return a string with the `dish name: <CATEGORY>` (which meal category the dish belongs to).
All dishes will "fit" into one of the categories imported from `sets_categories_data.py`
(VEGAN, VEGETARIAN, PALEO, KETO, or OMNIVORE).
"""
if VEGAN >= dish_ingredients:
category = 'VEGAN'
elif VEGETARIAN >= dish_ingredients:
category = 'VEGETARIAN'
elif PALEO >= dish_ingredients:
category = 'PALEO'
elif KETO >= dish_ingredients:
category = 'KETO'
elif OMNIVORE >= dish_ingredients:
category = 'OMNIVORE'
return f"{dish_name}: {category}"
|
dfe86e53a1bf013f2633c64972ee0415774d24cd
| 23,950 |
def generate_points_realistic(N=100, distortion_param=0, rng=None):
"""Generates two poses and the corresponding scene points and image points."""
# Check if a seed is used (for unittests)
if not rng:
rng = np.random.default_rng()
# Relative translation
t = 2 * rng.random((3, 1)) - 1
# Make sure the baseline is okay
t = t / np.linalg.norm(t)
# Calibration matrix
f = rng.random() * 200 + 200
K = np.diag([f, f, 1.0])
Kinv = np.diag([1.0 / f, 1.0 / f, 1.0])
R1, _ = np.linalg.qr(rng.random((3, 3)))
R2, _ = np.linalg.qr(rng.random((3, 3)))
R = R2 @ R1.T
P1 = K @ np.hstack((R1, np.zeros((3, 1))))
P2 = K @ np.hstack((R2, t))
# Fundamental matrix
F = Kinv.T @ skew(t) @ R @ Kinv
# Generate points with y-coordinate in front of scene
X = np.vstack((
6 * rng.random((1, N)) - 3,
5 * rng.random((1, N)) + 3,
6 * rng.random((1, N)) - 3,
np.ones((1, N))))
# Generate point correspondences (pinhole)
x1 = pflat(P1 @ X)
x2 = pflat(P2 @ X)
# Add radial distortion (if desired)
x1u = x1
x2u = x2
if distortion_param < 0:
x1 = radialdistort(x1, distortion_param)
x2 = radialdistort(x2, distortion_param)
return R1, R2, f, F, x1, x2, R, t, x1u, x2u
|
8ca04480f12f645c62b1b6c65dea49e691c6e35f
| 23,952 |
def search_covid_results(patient_id: str,
covid_df: pd.DataFrame):
"""
Given a patient ID and a dataframe of COVID-19 PCR results, return whether a patient had
a positive result at any point and the date of their first positive. If no positives but
negative results exist, return "N" for negative, otherwise "U" for unknown.
Parameters
----------
patient_id: str
Patient ID
covid_df: Pandas.DataFrame
COVID-19 PCR results
Returns
-------
str, str or None
"""
pt_status = covid_df[covid_df.PATIENT_ID == patient_id].sort_values("collection_datetime", ascending=True).copy()
positives = pt_status[pt_status.TEXT == "Positive"].copy()
for x in ["collection_datetime", "test_datetime"]:
positives[x] = positives[x].dt.strftime("%Y-%m-%dT%H:%M:%SZ")
if pt_status.shape[0] == 0:
return "U", None
if positives.shape[0] != 0:
first_positive = positives.iloc[0]
if pd.isnull(first_positive.collection_datetime):
if pd.isnull(first_positive.test_datetime):
return "P", None
return "P", first_positive.test_datetime
return "P", first_positive.collection_datetime
negatives = pt_status[pt_status.TEXT == "Negative"]
if negatives.shape[0] != 0:
return "N", None
return "U", None
|
af4a453fb3b3416f7052e08622c1ce28777871e9
| 23,954 |
from pathlib import Path
def str_is_path(p: str):
"""Detects if the variable contains absolute paths. If so, we distinguish paths that exist and paths that are images.
Args:
p: the Path
Returns:
True is is an absolute path
"""
try:
path = Path(p)
if path.is_absolute():
return True
else:
return False
except TypeError:
return False
|
79234f0bfe9a34d3335b2be2306f8ef3d8e90eed
| 23,956 |
def PLAY(command: Command) -> Command:
"""
Moves clip from background to foreground and starts playing it.
If a transition (see LOADBG) is prepared, it will be executed.
"""
return command
|
eebafb654cf6daef130c928d9b9ce003cb370a4b
| 23,957 |
from typing import Any
import json
def deserialize(data: str) -> dict:
"""
Given a string, deserialize it from JSON.
"""
if data is None:
return {}
def fix(jd: Any) -> Any:
if type(jd) == dict:
# Fix each element in the dictionary.
for key in jd:
jd[key] = fix(jd[key])
return jd
if type(jd) == list:
# Could be serialized by us, could be a normal list.
if len(jd) >= 1 and jd[0] == '__bytes__':
# This is a serialized bytestring
return bytes(jd[1:])
# Possibly one of these is a dictionary/list/serialized.
for i in range(len(jd)):
jd[i] = fix(jd[i])
return jd
# Normal value, its deserialized version is itself.
return jd
return fix(json.loads(data))
|
ce7282fc99985a348ae9cf2748132e0f53993b51
| 23,958 |
def seq_windows_df(
df,
target=None,
start_index=0,
end_index=None,
history_size=1,
target_size=1,
step=1,
single_step=False,
):
"""
create sliding window tuples for training nns on multivar timeseries
"""
data = []
labels = []
start_index = start_index + history_size
if target is None:
target = df
if end_index is None:
end_index = df.shape[0] - target_size
for i in range(start_index, end_index):
indices = range(i - history_size, i, step)
X = df.iloc[indices]
data.append(X)
if single_step:
label = target[i + target_size]
else:
label = target[i: i + target_size]
labels.append(label)
return data, labels
|
f95511c761e2da9ed493a71e9e849038d5fbba8b
| 23,959 |
from typing import List
from typing import Callable
def get_cachable_provider(
cachables: List[Cachable] = [Collection1(), Collection2()]
) -> Callable[[], List[Cachable]]:
"""
Returns a cachable_provider.
"""
return lambda: cachables
|
5c2e20b35fe472027ba6015ba7e3b896125ca6fd
| 23,960 |
def getQuality(component, propertyName):
# type: (JComponent, String) -> int
"""Returns the data quality for the property of the given component
as an integer.
This function can be used to check the quality of a Tag binding on a
component in the middle of the script so that alternative actions
can be taken in the event of device disconnections.
Args:
component: The component whose property is being checked.
propertyName: The name of the property as a string value.
Returns:
The data quality of the given property as an integer.
"""
print(component, propertyName)
return 192
|
a17f8fe581941c67935e0ce61d82ba7a407f77aa
| 23,961 |
def norm1(x):
"""Normalize to the unit sphere."""
return x / x.square().sum(axis=-1, keepdims=True).sqrt()
|
f65ce24fee174b579243c627e2b5526a83db973e
| 23,962 |
def markup_sentence(s, modifiers, targets, prune_inactive=True):
""" Function which executes all markup steps at once
"""
markup = pyConText.ConTextMarkup()
markup.setRawText(s)
markup.cleanText()
markup.markItems(modifiers, mode="modifier")
markup.markItems(targets, mode="target")
markup.pruneMarks()
markup.dropMarks('Exclusion')
# apply modifiers to any targets within the modifiers scope
markup.applyModifiers()
markup.pruneSelfModifyingRelationships()
if prune_inactive:
markup.dropInactiveModifiers()
return markup
|
397a5b2fef35ab3796917bede355e739868ad54e
| 23,963 |
def get_stream_info(stream_id):
"""
Uses the `/stream/info` endpoint taking the stream_id as a parameter.
e.g. stream_id="e83a515e-fe69-4b19-afba-20f30d56b719"
"""
endpoint = KICKFLIP_API_URL + '/stream/info/'
payload = {'stream_id': stream_id}
response = kickflip_session.post(endpoint, payload)
return response.json()
|
f590a3b3eb6764e01168fb2c93c9c566acd452d1
| 23,964 |
def invchisquared_sample(df, scale, size):
"""Return `size` samples from the inverse-chi-squared distribution."""
# Parametrize inverse-gamma
alpha = df/2
beta = df*scale/2.
# Parametrize gamma
k = alpha
theta = 1./beta
gamma_samples = np.random.gamma(k, theta, size)
return 1./gamma_samples
|
b8151f69efecd62c632e53ec62890223098fc78c
| 23,965 |
import scipy.io as sio
def get_data_from_matlab(file_url, index, columns, data):
"""Description:*
This function takes a Matlab file .mat and extract some
information to a pandas data frame. The structure of the mat
file must be known, as the loadmat function used returns a
dictionary of arrays and they must be called by the key name
Args:
file_url: the ubication of the .mat file
index: the key for the array of string date-like to be used as index
for the dataframe
columns: the key for the array of data to be used as columns in
the dataframe
data: the key for the array to be used as data in the dataframe
Returns:
Pandas dataframe
"""
# load mat file to dictionary
mat = sio.loadmat(file_url)
# define data to import, columns names and index
cl = mat[data]
stocks = mat[columns]
dates = mat[index]
# extract the ticket to be used as columns name in dataframe
# to-do: list compression here
columns = []
for each_item in stocks:
for inside_item in each_item:
for ticket in inside_item:
columns.append(ticket)
# extract string ins date array and convert to datetimeindex
# to-do list compression here
df_dates =[]
for each_item in dates:
for inside_item in each_item:
df_dates.append(inside_item)
df_dates = pd.Series([pd.to_datetime(date, format= '%Y%m%d') for date in df_dates], name='date')
# construct the final dataframe
data = pd.DataFrame(cl, columns=columns, index=df_dates)
return data
|
e7e28ed0a0a5f9d35e195df847218b4bea110840
| 23,966 |
def get_loop_end(header: bytes) -> int:
"""Return loop end position."""
assert isinstance(value := _unpack(header, "LOOP_END"), int), type(value)
assert 0 < value < 65535, value
return value
|
b04b9c109ce1936ca87cdbd61615039e367176fb
| 23,967 |
import warnings
def measuresegment(waveform, Naverage, minstrhandle, read_ch, mV_range=2000, process=True, device_parameters=None):
"""Wrapper to identify measurement instrument and run appropriate acquisition function.
Supported instruments: m4i digitizer, ZI UHF-LI
Args:
waveform (dict): waveform specification
Naverage (int): number of averages to perform
minstrhandle (str or Instrument): handle to acquisition device
read_ch (list): channels to read from the instrument
mV_range (float): range for input
process (bool): If True, process the segment data from scope reader
device_parameters (dict): dictionary passed as keyword parameters to the measurement methods
Returns:
data (numpy array): recorded and processed data
"""
if device_parameters is None:
device_parameters = {}
is_m4i = _is_m4i(minstrhandle)
is_uhfli = _is_measurement_device(minstrhandle, qcodes.instrument_drivers.ZI.ZIUHFLI.ZIUHFLI)
is_scope_reader = _is_measurement_device(minstrhandle, AcquisitionScopeInterface)
is_simulator = _is_measurement_device(minstrhandle, SimulationDigitizer)
measure_instrument = get_instrument(minstrhandle)
if is_m4i:
data = measuresegment_m4i(minstrhandle, waveform, read_ch, mV_range, Naverage, process=process,
**device_parameters)
elif is_uhfli:
data = measure_segment_uhfli(minstrhandle, waveform, read_ch, Naverage, **device_parameters)
elif is_scope_reader:
data = measure_segment_scope_reader(minstrhandle, waveform, Naverage, process=process, **device_parameters)
elif is_simulator:
data = measure_instrument.measuresegment(waveform, channels=read_ch)
elif minstrhandle == 'dummy':
# for testing purposes
data = np.random.rand(100, )
else:
raise Exception(f'Unrecognized fast readout instrument {minstrhandle}')
if np.array(data).size == 0:
warnings.warn('measuresegment: received empty data array')
return data
|
d66b8fdb438295e97b6a09b199fff501dab7c2e5
| 23,969 |
def grid_definition_proj():
"""Custom grid definition using a proj string."""
return {
"shape": (1, 1),
"bounds": (-4000000.0, -4000000.0, 4000000.0, 4000000.0),
"is_global": False,
"proj": example_proj,
}
|
b24e7b9d93073cded80b3e0af4b0b35dbc2439e4
| 23,970 |
def _compare_lines(line1, line2, tol=1e-14):
"""
Parameters
----------
line1: list of str
line2: list of str
Returns
-------
bool
"""
if len(line1) != len(line2):
return False
for i, a in enumerate(line1):
b = line2[i]
if type(a) not in {int, float}:
if a != b:
return False
elif type(a) is int and type(b) is int:
if a != b:
return False
elif type(a) in {int, float} and type(b) in {int, float}:
if abs(a - b) > tol:
return False
else:
if a != b:
return False
return True
|
ec45a9fea4dfea3988afaa8947d35e0cc5fb27ca
| 23,971 |
def helping_func(self, driver, value):
"""Helper function for testing method composition.
"""
return value + 1
|
2a204814213707a255b0b0e57e4d5ca23389045d
| 23,972 |
def add_languages_modify(schema, fields, locales=None):
"""Adds localized field keys to the given schema"""
if locales is None:
locales = get_locales()
ignore_missing = toolkit.get_validator('ignore_missing')
convert_to_extras = toolkit.get_converter('convert_to_extras')
for locale in locales:
for field in fields:
schema.update({"%s_%s" % (field, locale): [ignore_missing, str, convert_to_extras]})
return schema
|
3c2f0e001e5d6c8df90fc67a472a854fe1d3913f
| 23,973 |
from re import T
def from_iterable(
iterable: tp.Union[tp.Iterable[T], pypeln_utils.Undefined] = pypeln_utils.UNDEFINED,
use_thread: bool = True,
) -> tp.Union[Stage[T], pypeln_utils.Partial[Stage[T]]]:
"""
Creates a stage from an iterable.
Arguments:
iterable: A source Iterable.
use_thread: If set to `True` (default) it will use a thread instead of a process to consume the iterable. Threads start faster and use thread memory to the iterable is not serialized, however, if the iterable is going to perform slow computations it better to use a process.
Returns:
Returns a `Stage` if the `iterable` parameters is given, else it returns a `Partial`.
"""
if isinstance(iterable, pypeln_utils.Undefined):
return pypeln_utils.Partial(
lambda iterable: from_iterable(iterable, use_thread=use_thread)
)
return Stage(
process_fn=FromIterable(iterable),
workers=1,
maxsize=0,
timeout=0,
total_sources=1,
dependencies=[],
on_start=None,
on_done=None,
use_threads=use_thread,
f_args=[],
)
|
cc07cf1c74fd9ce65af112753152d3b508575ff0
| 23,974 |
def get_consumer_secret():
"""This is entirely questionable. See settings.py"""
consumer_secret = None
try:
loc = "%s/consumer_secret.txt" % settings.TWITTER_CONSUMER_URL
url = urllib2.urlopen(loc)
consumer_secret = url.read().rstrip()
except (urllib2.HTTPError, IOError), e:
print "Unable to obtain consumer_secret from %s: %s" % (loc, e)
return consumer_secret
|
21db497551396920fe87eb02ddb3267d8d4f9d7f
| 23,975 |
def preprocess(src, cutoff, shape=(240, 240)):
"""Pre-processes the image"""
# Resizing the image, for computational reasons, else the algorithm will take too much time
dst = cv2.resize(src, shape)
# (automated) Canny Edge Detection
dst = aced.detect(dst)
# Binary or Adaptive thresholding
dst = aced.thresh(dst, cutoff, method='bin')
return dst
|
76755a1a2698583f2afc8c05043acef072d6ad67
| 23,976 |
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The concept is defined in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
Assuming value.tzinfo is either None or a proper datetime.tzinfo,
value.utcoffset() implements the appropriate logic.
"""
return value.utcoffset() is not None
|
82d1199c24ced594d86945b1b76fa595a30083c1
| 23,977 |
import fsspec
def check_manifest(of: fsspec.core.OpenFile, manifest: str) -> bool:
"""
Check to see if a given string exists in a manifest file.
Parameters
==========
x: str
The string to check.
manifest: str
The path to a manifest file.
Returns
=======
True if the file is *not* in the manifest, False if it is.
"""
# Check if the file actually exists. If not, return true.
mf = fsspec.open(manifest, "r")
if not mf.fs.exists(manifest):
return True
# If the file exists, check if the file exists in it.
with mf as f:
content = set(f.read().split("\n"))
return of.path not in content
|
3e44cf192081d648698d5049bf39b3b0aa86ec34
| 23,978 |
def loadExpObjectFast(filename):
"""loads a CiPdeN object from a JSON file
irnores generation data, expect the first and the last
Parameters
----------
filename : str
includes path and filename
Returns
-------
dict
returns a dict if it worked,
else return None
"""
try:
with open(filename, 'rb') as f:
result = bigjson.load(f)
obj_dict = dict()
obj_dict["pde"] = result["pde"]
obj_dict["kernel_type"] = result["kernel_type"]
obj_dict["opt_algo"] = result["opt_algo"]
obj_dict["exec_time"] = result["exec_time"]
obj_dict["mem_consumption"] = result["mem_consumption"]
obj_dict["normL2"] = result["normL2"]
obj_dict["sol_kernel"] = np.array(result["sol_kernel"].to_python())
return obj_dict
except Exception as e:
print(str(e))
return None
|
abd513ee1b699c6dab882ab8d223243cdf3f7802
| 23,979 |
def Vij_beam_correct(j, Vij, centre=None):
"""Corrects Vij for the beam amplitude.
This is required when beam correction has not been done during calibration.
Assumes identical beam patterns.
Assumes calibrator source is at centre of image"""
my_shape = Vij[0, 0, :, :].shape
if centre is None:
centre = (my_shape[0] / 2, my_shape[1] / 2) # Cal source at image centre
logger.warning('Using centre of image as calibrator location')
temp = beam_tools.makeUnpolInstrumentalResponse(j[:, :, centre[0], centre[1]], j[:, :, centre[0], centre[1]])
XX = temp[0, 0]
YY = temp[1, 1]
# XY = temp[0, 1]
# YX = temp[1, 0]
correction = np.array([[XX, XX ** 0.5 * YY ** 0.5], [XX ** 0.5 * YY ** 0.5, YY]])
# correction=np.array([[XX,XY],[YX,YY]])
# correction=np.array([[XX,1],[1,YY]])
logger.warning('Calibration correction factors: XX=%s, XY=%s, YX=%s, YY=%s' % (correction[0, 0],
correction[0, 1],
correction[1, 0],
correction[1, 1]))
# Tile 2x2 correction matrix apply to Vij
Vij_corrected = Vij * np.tile(correction[:, :, np.newaxis, np.newaxis], (my_shape[0], my_shape[1]))
return Vij_corrected
|
4dafe4c9aaa092b03618fd3eda4fb066ce49a61d
| 23,980 |
import requests
def get_data(github, selected_repos):
"""Generate json form custom-cards org."""
org = "custom-cards"
data = {}
repos = []
if selected_repos:
repos.append(selected_repos)
else:
for repo in list(github.get_user(org).get_repos()):
repos.append(repo.name)
for repo in repos:
try:
repo = github.get_repo(org + "/" + repo)
if repo.name not in BLACKLIST and not repo.archived:
print("Generating json for:", "{}/{}".format(org, repo.name))
try:
release = list(repo.get_releases())[0]
except Exception: # pylint: disable=W0703
release = None
name = repo.name
version = None
try:
if release and release.tag_name is not None:
version = release.tag_name
else:
content = repo.get_file_contents("VERSION")
content = content.decoded_content.decode()
version = content.split()[0]
except Exception: # pylint: disable=W0703
version = None
if release:
remote_location = REUSE_TAG.format(org, name, version, name)
else:
remote_location = REUSE.format(org, name, name)
remote_location = remote_location + ".js"
testfile = requests.get(remote_location)
if testfile.status_code != 200:
remote_location = remote_location.split(name + ".js")[0]
remote_location = remote_location + "dist/" + name + ".js"
testfile = requests.get(remote_location)
if testfile.status_code != 200:
remote_location = remote_location.split("dist/" + name + ".js")[0]
remote_location = remote_location + "src/" + name + ".js"
testfile = requests.get(remote_location)
if testfile.status_code != 200:
continue
visit_repo = VISIT.format(org, name)
try:
changelog = list(repo.get_releases())[0].html_url
if "untagged" in list(repo.get_releases())[0].name:
changelog = None
except Exception: # pylint: disable=W0703
changelog = None
if changelog is None:
changelog = VISIT.format(org, name)
data[name] = {}
data[name]["version"] = version
data[name]["remote_location"] = remote_location
data[name]["visit_repo"] = visit_repo
data[name]["changelog"] = changelog
except Exception as error: # pylint: disable=W0703
print(error)
return data
|
bc7586f024c2017d2a97acd397c46fb29c9b80af
| 23,981 |
def fensemble_boosting_regressor(preds_valid, targs_valid, preds_train, targs_train, alpha=0.9):
"""
Learn combination of ensemble members from training data using Gradient Boosting Regression
Also provides prediction intervals (using quantile regression)
alpha = % prediction interval
https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
https://towardsdatascience.com/how-to-generate-prediction-intervals-with-scikit-learn-and-python-ab3899f992ed
"""
ensemble_preds = []
ensemble_lower = []
ensemble_upper = []
H = preds_valid.shape[2]
# run for each day over horizon
for h in range(H):
X_train = preds_train[:,:,h].T
y_train = targs_train[:,h]
X_test = preds_valid[:,:,h].T
y_test = targs_valid[:,h]
upper_model = GradientBoostingRegressor(loss="quantile", alpha=alpha)
mid_model = GradientBoostingRegressor(loss="ls")
lower_model = GradientBoostingRegressor(loss="quantile", alpha=(1.0-alpha))
# fit models
lower_model.fit(X_train, y_train)
mid_model.fit(X_train, y_train)
upper_model.fit(X_train, y_train)
# store predictions
ensemble_preds.append(mid_model.predict(X_test))
ensemble_lower.append(lower_model.predict(X_test))
ensemble_upper.append(upper_model.predict(X_test))
return np.stack(ensemble_preds).T, np.stack(ensemble_lower).T, np.stack(ensemble_upper).T
|
5cf6c04490fc1bb774bae50d305db1df5d266f2e
| 23,982 |
def setup_land_units(srank):
"""
Sets up our land forces for an effective social rank. We go through an populate a dictionary
of constants that represent the IDs of unit types and their quantities. That dict is then
returned to setup_units for setting the base size of our navy.
Args:
srank (int): Our effective social rank for determing the size of our army.
Returns:
A dict of unit IDs to the quanity of those troops.
"""
INF = unit_constants.INFANTRY
PIK = unit_constants.PIKE
CAV = unit_constants.CAVALRY
ARC = unit_constants.ARCHERS
units = {}
# add more units based on srank
if srank > 6:
units[INF] = 75
units[PIK] = 30
units[CAV] = 15
units[ARC] = 30
elif srank == 6:
units[INF] = 200
units[PIK] = 70
units[CAV] = 40
units[ARC] = 70
elif srank == 5:
units[INF] = 375
units[PIK] = 125
units[CAV] = 70
units[ARC] = 125
elif srank == 4:
units[INF] = 750
units[PIK] = 250
units[CAV] = 125
units[ARC] = 250
elif srank == 3:
units[INF] = 1500
units[PIK] = 500
units[CAV] = 250
units[ARC] = 500
elif srank == 2:
units[INF] = 3000
units[PIK] = 1000
units[CAV] = 500
units[ARC] = 1000
elif srank == 1:
units[INF] = 5000
units[PIK] = 1500
units[CAV] = 1000
units[ARC] = 1500
elif srank < 1:
units[INF] = 10000
units[PIK] = 3000
units[CAV] = 2000
units[ARC] = 3000
return units
|
06f9ba5dd5164355df16cdda28f4c34a1dc4dac9
| 23,983 |
def one_hot_encode_test(test, txt_indexes_test):
"""Return the test dataframe with label-encoded textual features.
Keyword arguments:
test -- the test dataframe
txt_indexes_test -- ndarray of test textual column indexes
"""
test_dummies = pd.get_dummies(test.iloc[:, txt_indexes_test])
test.drop(test.select_dtypes('object').columns, axis=1, inplace=True)
test = pd.concat([test, test_dummies], axis=1)
return test
|
41d60b9e3356e99c453ec022bff10ba90c096ad3
| 23,984 |
def get_with_label(label, tree):
"""
Get a tree's node given it's label
"""
return [n for n in tree.children if n.label == label][0]
|
fc976bcbbf8f5a03b2a17dd7b5c0061a22bedf60
| 23,985 |
import torch
def load_state_dicts(checkpoint_file, map_location=None, **kwargs):
""" Load torch items from saved state_dictionaries
"""
if map_location is None:
checkpoint = torch.load(checkpoint_file)
else:
checkpoint = torch.load(checkpoint_file, map_location=map_location)
for key, value in kwargs.items():
value.load_state_dict(checkpoint[key])
epoch = checkpoint.get('epoch')
if epoch:
return epoch
|
dc0f6646043a03456e8ab45888866db5c517381d
| 23,986 |
import logging
from re import T
def build_transform_gen(cfg, is_train):
"""
Create a list of :class:`TransformGen` from config.
Now it includes resizing and flipping.
Returns:
list[TransformGen]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
if sample_style == "range":
assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(
len(min_size)
)
logger = logging.getLogger("detectron2.data.detection_utils")
tfm_gens = []
tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
if is_train:
tfm_gens.append(T.RandomContrast(0.5, 1.5))
tfm_gens.append(T.RandomBrightness(0.5, 1.5))
tfm_gens.append(T.RandomSaturation(0.5, 1.5))
tfm_gens.append(T.RandomFlip())
logger.info("TransformGens used in training[Updated]: " + str(tfm_gens))
return tfm_gens
|
9102077f0f4c7e1796399386b64944e6e2cb4087
| 23,987 |
def rain_specific_attenuation(R, f, el, tau):
"""Compute the specific attenuation γ_R (dB/km) given the rainfall rate.
A method to compute the specific attenuation γ_R (dB/km) from rain. The
value is obtained from the rainfall rate R (mm/h) using a power law
relationship.
.. math::
\\gamma_R = k R^\\alpha
Parameters
----------
R : number, sequence, numpy.ndarray or Quantity
Rain rate (mm/h)
f : number or Quantity
Frequency (GHz)
el : number, sequence, or numpy.ndarray
Elevation angle of the receiver points
tau : number, sequence, or numpy.ndarray
Polarization tilt angle relative to the horizontal (degrees). Tau = 45
deg for circular polarization)
Returns
-------
γ_R: numpy.ndarray
Specific attenuation from rain (dB/km)
References
----------
[1] Rain height model for prediction methods:
https://www.itu.int/rec/R-REC-P.838/en
"""
R = prepare_quantity(R, u.mm / u.hr, 'Rain rate')
f = prepare_quantity(f, u.GHz, 'Frequency')
return __model.rain_specific_attenuation(R, f, el, tau) * u.dB / u.km
|
c4a4994c7bbb08b9156e308eb6b8b9a8c7d9a99a
| 23,988 |
def parse_input(usr_input):
"""Main logic of program"""
usr_input = usr_input.strip()
if usr_input.upper() == QUIT_KEY: #exit logic
return False
else:
usr_input = usr_input.split()
if len(usr_input) == 1: #if only one argument supplied default to weekly
pay = float(usr_input[0])
pay_after_tax = round(pay - paye_funcs.calculate_PAYE(pay, "w"), 2)
print(OUTPUT.format(pay_after_tax))
elif len(usr_input) == 2: #two arguments check if expecting 3 arguments and calculate
if usr_input[1] == '-n':
print(ERROR_NO_N_NUMBER)
else:
decide_calculation(usr_input)
elif len(usr_input) == 3:
if usr_input[1] == '-n':
if usr_input[2].isnumeric():
calculate_paye_on_days(usr_input)
else:
print(ERROR_NO_N_NUMBER)
else:
print(UNKNOWN_COMMAND)
return True
|
f80ec34361f35c63a90f688a0e9cc77c67ccbbb1
| 23,990 |
def plot_feature_importances(clf, title='Feature Importance',
feature_names=None, max_num_features=20,
order='descending', x_tick_rotation=0, ax=None,
figsize=None, title_fontsize="large",
text_fontsize="medium"):
"""Generates a plot of a classifier's feature importances.
Args:
clf: Classifier instance that implements ``fit`` and ``predict_proba``
methods. The classifier must also have a ``feature_importances_``
attribute.
title (string, optional): Title of the generated plot. Defaults to
"Feature importances".
feature_names (None, :obj:`list` of string, optional): Determines the
feature names used to plot the feature importances. If None,
feature names will be numbered.
max_num_features (int): Determines the maximum number of features to
plot. Defaults to 20.
order ('ascending', 'descending', or None, optional): Determines the
order in which the feature importances are plotted. Defaults to
'descending'.
x_tick_rotation (int, optional): Rotates x-axis tick labels by the
specified angle. This is useful in cases where there are numerous
categories and the labels overlap each other.
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> rf = RandomForestClassifier()
>>> rf.fit(X, y)
>>> skplt.plot_feature_importances(
... rf, feature_names=['petal length', 'petal width',
... 'sepal length', 'sepal width'])
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_feature_importances.png
:align: center
:alt: Feature Importances
"""
if not hasattr(clf, 'feature_importances_'):
raise TypeError('"feature_importances_" attribute not in classifier. '
'Cannot plot feature importances.')
importances = clf.feature_importances_
if hasattr(clf, 'estimators_')\
and isinstance(clf.estimators_, list)\
and hasattr(clf.estimators_[0], 'feature_importances_'):
std = np.std([tree.feature_importances_ for tree in clf.estimators_],
axis=0)
else:
std = None
if order == 'descending':
indices = np.argsort(importances)[::-1]
elif order == 'ascending':
indices = np.argsort(importances)
elif order is None:
indices = np.array(range(len(importances)))
else:
raise ValueError('Invalid argument {} for "order"'.format(order))
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
if feature_names is None:
feature_names = indices
else:
feature_names = np.array(feature_names)[indices]
max_num_features = min(max_num_features, len(importances))
ax.set_title(title, fontsize=title_fontsize)
if std is not None:
ax.bar(range(max_num_features),
importances[indices][:max_num_features], color='r',
yerr=std[indices][:max_num_features], align='center')
else:
ax.bar(range(max_num_features),
importances[indices][:max_num_features],
color='r', align='center')
ax.set_xticks(range(max_num_features))
ax.set_xticklabels(feature_names[:max_num_features],
rotation=x_tick_rotation)
ax.set_xlim([-1, max_num_features])
ax.tick_params(labelsize=text_fontsize)
return ax
|
2237451bcde4227bc7f5c912a39e0d7bdf511ced
| 23,991 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.