content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def cdgmm(A, B, inplace=False):
"""Complex pointwise multiplication.
Complex pointwise multiplication between (batched) tensor A and tensor B.
Parameters
----------
A : tensor
A is a complex tensor of size (B, C, M, N, 2).
B : tensor
B is a complex tensor of size (M, N, 2) or real tensor of (M, N, 1).
inplace : boolean, optional
If set to True, all the operations are performed inplace.
Returns
-------
C : tensor
Output tensor of size (B, C, M, N, 2) such that:
C[b, c, m, n, :] = A[b, c, m, n, :] * B[m, n, :].
"""
if not iscomplex(A):
raise TypeError('The input must be complex, indicated by a last '
'dimension of size 2.')
if B.ndimension() != 3:
raise RuntimeError('The filter must be a 3-tensor, with a last '
'dimension of size 1 or 2 to indicate it is real '
'or complex, respectively.')
if not iscomplex(B) and not isreal(B):
raise TypeError('The filter must be complex or real, indicated by a '
'last dimension of size 2 or 1, respectively.')
if A.size()[-3:-1] != B.size()[-3:-1]:
raise RuntimeError('The filters are not compatible for multiplication!')
if A.dtype is not B.dtype:
raise TypeError('A and B must be of the same dtype.')
if A.device.type != B.device.type:
raise TypeError('A and B must be both on GPU or both on CPU.')
if A.device.type == 'cuda':
if A.device.index != B.device.index:
raise TypeError('A and B must be on the same GPU!')
if isreal(B):
if inplace:
return A.mul_(B)
else:
return A * B
else:
C = A.new(A.size())
A_r = A[..., 0].contiguous().view(-1, A.size(-2)*A.size(-3))
A_i = A[..., 1].contiguous().view(-1, A.size(-2)*A.size(-3))
B_r = B[...,0].contiguous().view(B.size(-2)*B.size(-3)).unsqueeze(0).expand_as(A_i)
B_i = B[..., 1].contiguous().view(B.size(-2)*B.size(-3)).unsqueeze(0).expand_as(A_r)
C[..., 0].view(-1, C.size(-2)*C.size(-3))[:] = A_r * B_r - A_i * B_i
C[..., 1].view(-1, C.size(-2)*C.size(-3))[:] = A_r * B_i + A_i * B_r
return C if not inplace else A.copy_(C)
|
c3a65ec03339edd0defe723fc860ff9f54495eda
| 29,125 |
def InvocationStartEncKeyVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartEncKeyVector(builder, numElems)
|
c00609da890986ff4cf5c30f246459342b9d60bd
| 29,126 |
from functools import reduce
def dynamic_partial_sum_product(
sum_op, prod_op, factors, eliminate=frozenset(), plate_to_step=dict()
):
"""
Generalization of the tensor variable elimination algorithm of
:func:`funsor.sum_product.partial_sum_product` to handle higer-order markov
dimensions in addition to plate dimensions. Markov dimensions in transition
factors are eliminated efficiently using the parallel-scan algorithm in
:func:`funsor.sum_product.sarkka_bilmes_product`. The resulting factors are then
combined with the initial factors and final states are eliminated. Therefore,
when Markov dimension is eliminated ``factors`` has to contain
initial factors and transition factors.
:param ~funsor.ops.AssociativeOp sum_op: A semiring sum operation.
:param ~funsor.ops.AssociativeOp prod_op: A semiring product operation.
:param factors: A collection of funsors.
:type factors: tuple or list
:param frozenset eliminate: A set of free variables to eliminate,
including both sum variables and product variable.
:param dict plate_to_step: A dict mapping markov dimensions to
``step`` collections that contain ordered sequences of Markov variable names
(e.g., ``{"time": frozenset({("x_0", "x_prev", "x_curr")})}``).
Plates are passed with an empty ``step``.
:return: a list of partially contracted Funsors.
:rtype: list
"""
assert callable(sum_op)
assert callable(prod_op)
assert isinstance(factors, (tuple, list))
assert all(isinstance(f, Funsor) for f in factors)
assert isinstance(eliminate, frozenset)
assert isinstance(plate_to_step, dict)
# process plate_to_step
plate_to_step = plate_to_step.copy()
prev_to_init = {}
markov_to_sarkka = {}
markov_sum_vars = set()
for key, step in plate_to_step.items():
for chain in step:
# map old markov step names to sarkka_bilmes format step names
# Case 1
# x_slice(0, 5, None) -> _PREV__PREV_x_slice(2, 7, None)
# x_slice(1, 6, None) -> _PREV_x_slice(2, 7, None)
# x_slice(2, 7, None) -> x_slice(2, 7, None)
# Case 2
# x_prev - > _PREV_x_curr
# x_curr -> x_curr
history = len(chain) // 2
base_name = chain[-1]
for t, name in enumerate(reversed(chain[history:-1])):
markov_to_sarkka[name] = _shift_name(base_name, t + 1)
markov_sum_vars.add(base_name)
markov_sum_vars.update(markov_to_sarkka)
# map prev to init; works for any history > 0
init, prev = chain[: len(chain) // 2], chain[len(chain) // 2 : -1]
prev = tuple(markov_to_sarkka[name] for name in prev)
prev_to_init.update(zip(prev, init))
markov_sum_vars = frozenset(markov_sum_vars)
plates = frozenset(plate_to_step.keys())
sum_vars = eliminate - plates
prod_vars = eliminate.intersection(plates)
markov_prod_vars = frozenset(
k for k, v in plate_to_step.items() if v and k in eliminate
)
markov_sum_to_prod = defaultdict(set)
for markov_prod in markov_prod_vars:
for chain in plate_to_step[markov_prod]:
for name in chain[len(chain) // 2 :]:
markov_sum_to_prod[name].add(markov_prod)
var_to_ordinal = {}
ordinal_to_factors = defaultdict(list)
for f in factors:
ordinal = plates.intersection(f.inputs)
ordinal_to_factors[ordinal].append(f)
for var in sum_vars.intersection(f.inputs):
var_to_ordinal[var] = var_to_ordinal.get(var, ordinal) & ordinal
ordinal_to_vars = defaultdict(set)
for var, ordinal in var_to_ordinal.items():
ordinal_to_vars[ordinal].add(var)
results = []
while ordinal_to_factors:
leaf = max(ordinal_to_factors, key=len)
leaf_factors = ordinal_to_factors.pop(leaf)
leaf_reduce_vars = ordinal_to_vars[leaf]
for (group_factors, group_vars) in _partition(
leaf_factors, leaf_reduce_vars | markov_prod_vars
):
# eliminate non markov vars
nonmarkov_vars = group_vars - markov_sum_vars - markov_prod_vars
f = reduce(prod_op, group_factors).reduce(sum_op, nonmarkov_vars)
# eliminate markov vars
markov_vars = group_vars.intersection(markov_sum_vars)
if markov_vars:
markov_prod_var = [markov_sum_to_prod[var] for var in markov_vars]
assert all(p == markov_prod_var[0] for p in markov_prod_var)
if len(markov_prod_var[0]) != 1:
raise ValueError("intractable!")
time = next(iter(markov_prod_var[0]))
for v in sum_vars.intersection(f.inputs):
if time in var_to_ordinal[v] and var_to_ordinal[v] < leaf:
raise ValueError("intractable!")
time_var = Variable(time, f.inputs[time])
# markov_to_sarkka renames variables in MarkovProduct format
# to sarkka_bilmes_product format
base_names = markov_vars.intersection(
_shift_name(name, -_get_shift(name))
for name in markov_to_sarkka.values()
)
f = f(**markov_to_sarkka)
global_vars = frozenset(
set(f.inputs)
- {time_var.name}
- set(markov_to_sarkka.values())
- base_names
)
with funsor.terms.eager:
f = funsor.optimizer.apply_optimizer(f)
f = sarkka_bilmes_product(sum_op, prod_op, f, time_var, global_vars)
f = f.reduce(sum_op, base_names)
f = f(**prev_to_init)
remaining_sum_vars = sum_vars.intersection(f.inputs)
if not remaining_sum_vars:
results.append(f.reduce(prod_op, leaf & prod_vars - markov_prod_vars))
else:
new_plates = frozenset().union(
*(var_to_ordinal[v] for v in remaining_sum_vars)
)
if new_plates == leaf:
raise ValueError("intractable!")
f = f.reduce(prod_op, leaf - new_plates - markov_prod_vars)
ordinal_to_factors[new_plates].append(f)
return results
|
a08298f1440c212310cc3298629e27743325c9ca
| 29,127 |
def range_to_number(interval_str):
"""Converts "X-Y" -> "X"."""
if not '-' in interval_str:
return int(interval_str)
# If first character is -, X is a negative number
if interval_str.startswith('-'):
number = '-' + interval_str.split('-')[1]
else:
number = interval_str.split('-')[0]
if number[-1] == 'M':
return int(round(float(number[:-1]) * 1000000))
elif number[-1] == 'B':
return int(round(float(number[:-1]) * 1000000000))
elif '.' in number:
return float(number)
else:
return int(number)
|
562031503241cc37b1b6df5dd657f2f2d90b79a3
| 29,128 |
import warnings
def load_wav_file_with_wavio(
file_path, sample_rate, mono=True, resample_type="kaiser_best"
):
"""Load a 24-bit wav audio file as a floating point time series. Significantly faster than
load_sound_file."""
wavio_obj = wavio.read(str(file_path))
samples = wavio_obj.data
actual_sample_rate = wavio_obj.rate
if samples.dtype != np.float32:
if wavio_obj.sampwidth == 3:
samples = np.true_divide(
samples, 8388608, dtype=np.float32
) # ends up roughly between -1 and 1
elif wavio_obj.sampwidth == 2:
samples = np.true_divide(
samples, 32768, dtype=np.float32
) # ends up roughly between -1 and 1
else:
raise Exception("Unknown sampwidth")
if mono and len(samples.shape) > 1:
if samples.shape[1] == 1:
samples = samples[:, 0]
else:
samples = np.mean(samples, axis=1)
if sample_rate is not None and actual_sample_rate != sample_rate:
if resample_type == "auto":
resample_type = (
"kaiser_fast" if actual_sample_rate < sample_rate else "kaiser_best"
)
samples = librosa.resample(
samples, actual_sample_rate, sample_rate, res_type=resample_type
)
warnings.warn(
"{} had to be resampled from {} hz to {} hz. This hurt execution time.".format(
str(file_path), actual_sample_rate, sample_rate
)
)
actual_sample_rate = actual_sample_rate if sample_rate is None else sample_rate
return samples, actual_sample_rate
|
a1b7896e8ac4b9b5833c3ca25776295deb56839e
| 29,129 |
def as_cidr(cr: CidrRepr) -> Cidr:
"""
Returns a strict network address expressed as in CIDR form: either a string, expressing the network address as
``"<network number><zeros>/<mask bits>"``, or as a ``Cidr`` object, which is returned unaltered.
"""
if isinstance(cr, _BaseNetwork):
return cr
return ip_network(cr)
|
7d6d40c7269619f6189ea1cee940ed4d33eadb1f
| 29,131 |
from vivofoundation import get_triples
def get_authorship(authorship_uri):
"""
Given a URI, return an object that contains the authorship it represents
"""
authorship = {'authorship_uri':authorship_uri}
triples = get_triples(authorship_uri)
try:
count = len(triples["results"]["bindings"])
except:
count = 0
i = 0
while i < count:
b = triples["results"]["bindings"][i]
p = b['p']['value']
o = b['o']['value']
if p == "http://vivoweb.org/ontology/core#authorRank":
authorship['author_rank'] = o
if p == "http://vivoweb.org/ontology/core#linkedAuthor":
authorship['author_uri'] = o
if p == "http://vivoweb.org/ontology/core#linkedInformationResource":
authorship['publication_uri'] = o
if p == "http://vivoweb.org/ontology/core#isCorrespondingAuthor":
authorship['corresponding_author'] = o
i = i + 1
return authorship
|
83a1d6a763e16d43c7c83f65f7f3ad11afd5506e
| 29,132 |
def Init():
""" Инициализации важных переменных """
# Получаем список листов, их Id и название
spreadsheet = service.spreadsheets().get(spreadsheetId = spreadsheet_id).execute()
sheetList = spreadsheet.get('sheets')
sheetUsers = sheetList[0]['properties']['sheetId']
sheetSW = sheetList[1]['properties']['sheetId']
#последняя строчка откуда можно присоединять новенького
last_raw_in_SW = sorted( sheetList[1]['merges'], key = lambda x: x['endRowIndex'], reverse = True)[0]['endRowIndex'] # Узнаём последнюю заполненную строчку в таблице с очками
return sheetUsers, sheetSW, last_raw_in_SW
|
917f2ec6f5d39260ed89c546695e7cd2839bc7b6
| 29,133 |
def Nlam_to_Flam(wave, zeropoint, zp_min=5.0, zp_max=30.0):
"""
The factor that when multiplied into N_lam converts to F_lam, i.e. S_lam where S_lam \equiv F_lam/N_lam
Parameters
----------
wave (`numpy.ndarray`_):
Wavelength vector for zeropoint
zeropoint (`numpy.ndarray`_):
zeropoint
zp_min (float, optional):
Minimum allowed value of the ZP. For smaller values the S_lam factor is set to zero
zp_max (float, optional):
Maximum allowed value of the ZP. For larger values the S_lam factor is set to zero
Returns
-------
"""
gpm = (wave > 1.0) & (zeropoint > zp_min) & (zeropoint < zp_max)
factor = np.zeros_like(wave)
factor[gpm] = np.power(10.0, -0.4*(zeropoint[gpm] - ZP_UNIT_CONST))/np.square(wave[gpm])
return factor
|
843560dde9e4ec6d2781e4179ca047d00c5e3abc
| 29,134 |
def base64_values_validate(name, description, color_set):
"""Ensures the string wasn't maliciously fabricated to feed corrupted data into the app, even if the b64 code itself
successfully decoded into a valid string."""
if custom_palette_name_validate(name) or custom_palette_description_validate(description) or \
custom_palette_color_set_validate(color_set):
return {'error': True}
|
878a5cdf20bcc380a8e81afc47e40592eb4db030
| 29,135 |
def isIndepFromTarget(df, attr, x):
"""
Determiner si un attr est independant de target
:df: dataframe choisit
:attr: l'argument choisit a etudier
:x: seuil
:retourner true si n attr est independant de target
"""
obs=[[], []]
ref=[]
for t in df.itertuples():
dic=t._asdict()
if dic[attr] not in ref:
ref.append(dic[attr])
index=ref.index(dic[attr])
if(len(obs[0])<index+1):
obs[0].append(0)
obs[1].append(0)
obs[dic['target']][index]+=1
a,b,c,d=chi2_contingency(obs)
if b<x:
return False
return True
|
7bad7b227f3c7af5413ad9778870c44c7844bf6a
| 29,136 |
def read_docs_md(filename, root=None):
"""
Retrieves an apidoc markdown file to be implemented in swagger_auto_schema
:param(str) root: root base dir, settings.BASE_DIR as default
:param(str) filename: the filename to be retrieved without the .md file type
:return: the content of the md file, None if not found
"""
base = root or settings.BASE_DIR
try:
f = open(f"{base}/apidocs/{filename}.md", "r")
return f.read()
except FileNotFoundError:
return None
|
e888e1df91b4154fb9d67f6268da76ab2028f780
| 29,137 |
def unrotate(points, posor):
"""Rotate the matrix of column vectors points according to posor, i.e., from
absolute coordinates to camera coordinates"""
rot_matrix = calc_rot_matrix(posor)
return rot_matrix.I * points
|
58066c958da982d035792997eaff00bfc573d2d1
| 29,138 |
def exp_tail(d, x):
"""Tail of the exponential series starting at d. Needed in the set sampler.
Parameters
----------
d: int
x: float
Returns
-------
float
"""
result = exp(x)
# Subtract the _first _d terms.
for i in range(d):
result -= (pow(x, i) / factorial(i))
return result
|
48ac3da79e293451d42c5d55196863e27ed3b3e1
| 29,139 |
def check_projects_scores(request, hackathon_id):
""" When a judge submits the score, check if all projects in the Hackathon
were scored by all the judges in all the categories by comparing the
number of objects in HackProjectScore for each projects to the required
number of objects.
If all projects weren't scored, render final_score.html without the
score table.
If all the projects were scored, calculate the total score for each team,
sort the teams by scores
and render final_score.html with the score table.
"""
hackathon = get_object_or_404(Hackathon, pk=hackathon_id)
HackAwardFormSet = modelformset_factory(
HackAward, fields=('id', 'hack_award_category',
'winning_project'),
form=HackAwardForm, extra=0)
if request.method == 'POST':
hack_awards_formset = HackAwardFormSet(
request.POST,
form_kwargs={'hackathon_id': hackathon_id},
queryset=HackAward.objects.filter(hackathon=hackathon))
if hack_awards_formset.is_valid():
try:
with transaction.atomic():
hack_awards_formset.save()
except IntegrityError as e:
if 'UNIQUE' in str(e):
messages.error(request,
("Each award category can only be added "
"once to a hackathon."))
else:
logger.exception(e)
messages.error(request,
("An unexpected error occurred. Please "
"try again."))
else:
messages.error(request,
"An unexpected error occurred. Please try again.")
return redirect(reverse('hackathon:final_score',
kwargs={'hackathon_id': hackathon_id}))
else:
judges = [judge.slack_display_name for judge in hackathon.judges.all()]
teams = [team.display_name for team in hackathon.teams.all()
if team.project]
scores = query_scores(hackathon_id)
scores_table = create_judges_scores_table(scores, judges, teams)
hack_awards_formset = HackAwardFormSet(
form_kwargs={'hackathon_id': hackathon_id},
queryset=HackAward.objects.filter(hackathon=hackathon))
return render(request, 'hackathon/final_score.html', {
'hackathon': hackathon.display_name,
'hack_awards_formset': hack_awards_formset,
'scores_table': scores_table,
'teams_without_projects': '\n'+'\n'.join([
team.display_name
for team in hackathon.teams.all()
if not team.project]),
})
|
edfb52db396a984e10a437a1b6561a3e493d9e0e
| 29,140 |
def get_path(obj, path, default=None):
"""Get the value at any depth of a nested object based on the path
described by `path`. If path doesn't exist, `default` is returned.
Args:
obj (list|dict): Object to process.
path (str|list): List or ``.`` delimited string of path describing
path.
Keyword Arguments:
default (mixed): Default value to return if path doesn't exist.
Defaults to ``None``.
Returns:
mixed: Value of `obj` at path.
Example:
>>> get_path({}, 'a.b.c') is None
True
>>> get_path({'a': {'b': {'c': [1, 2, 3, 4]}}}, 'a.b.c.1')
2
.. versionadded:: 2.0.0
.. versionchanged:: 2.2.0
Support escaping "." delimiter in single string path key.
"""
for key in path_keys(path):
obj = get_item(obj, key, default=default)
if obj is None:
break
return obj
|
c72cd428979a3f39214c57346aa345087a0248c7
| 29,144 |
def get_label_set_args():
"""
Add arguments specific to the "Label Set" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--in_dir', type=str, default='output/influence_set/')
cmd.add('--out_dir', type=str, default='output/label_set/')
cmd.add('--val_frac', type=float, default=0.1)
cmd.add('--edit_frac', type=float, nargs='+',
default=[0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5])
return cmd
|
d08e59452ba7afdd8a478f589366e9ba219abb18
| 29,145 |
def literal_label(lit):
""" Invent a nice label name for the given literal """
return '{}_{}'.format(lit.function.name, lit.name)
|
14a22d989ee9f07e00e66d1340b946d385d677fd
| 29,146 |
from typing import Optional
def key(element: DOMElement) -> Optional[str]:
"""
Retrieve the key of a particular :class:`.DOMElement` in its parent element, if it can be
referred to by a key (i.e. if it its parent element is a :class:`collections.abc.Mapping`).
:param element: A DOM element
:return: The key of that DOM element in its parent, or None if it has no key
"""
return dom(element).element_key
|
f7ac059faa2023f88bad386a3d28e44a44c4259d
| 29,147 |
def absolute_reverse(view_name, query_kwargs=None, args=None, kwargs=None):
"""Like django's `reverse`, except returns an absolute URL. Also add query parameters."""
relative_url = reverse(view_name, kwargs=kwargs)
url = website_util.api_v2_url(relative_url, params=query_kwargs)
return url
|
11fc1bdc7be40fbbd462570f70f9fe77a5b4777f
| 29,148 |
def convert(chinese):
"""converts Chinese numbers to int
in: string
out: string
"""
numbers = {'零':0, '一':1, '二':2, '三':3, '四':4, '五':5, '六':6, '七':7, '八':8, '九':9, '壹':1, '贰':2, '叁':3, '肆':4, '伍':5, '陆':6, '柒':7, '捌':8, '玖':9, '两':2, '廿':20, '卅':30, '卌':40, '虚':50, '圆':60, '近':70, '枯':80, '无':90}
units = {'个':1, '十':10, '百':100, '千':1000, '万':10000, '亿':100000000, '拾':10, '佰':100, '仟':1000}
number, pureNumber = 0, True
for i in range(len(chinese)):
if chinese[i] in units or chinese[i] in ['廿', '卅', '卌', '虚', '圆', '近', '枯', '无']:
pureNumber = False
break
if chinese[i] in numbers:
number = number * 10 + numbers[chinese[i]]
if pureNumber:
return number
number = 0
for i in range(len(chinese)):
if chinese[i] in numbers or chinese[i] == '十' and (i == 0 or chinese[i - 1] not in numbers or chinese[i - 1] == '零'):
base, currentUnit = 10 if chinese[i] == '十' and (i == 0 or chinese[i] == '十' and chinese[i - 1] not in numbers or chinese[i - 1] == '零') else numbers[chinese[i]], '个'
for j in range(i + 1, len(chinese)):
if chinese[j] in units:
if units[chinese[j]] >= units[currentUnit]:
base, currentUnit = base * units[chinese[j]], chinese[j]
number = number + base
return number
|
c08b9e01f0981afd09d2d9537ec1e98f2af46c06
| 29,149 |
def method_not_raises(UnexpectedException):
"""A decorator that ensures that the underlying function does not raise the UnexpectedException"""
@Decorators.decorator
def method_not_raises(target, *args, **kwargs):
return not_raises(UnexpectedException, target, *args, **kwargs)
return method_not_raises
|
f5267acedcd7bebec7d0cae998635c03c95e2eb8
| 29,150 |
def create_random_data(n_randoms, stomp_map):
"""Function for creating randomly positioned unknown objects on the considerd
geomometry. These is used for normalizing the output PDF and properly
estimating the "zero point" of the correlation amplitude. The code returns
a spatially searchable quad tree of the random points.
----------------------------------------------------------------------------
Args:
n_randoms: int number of random points to generate
stomp_map: STOMP.Map object specifying the survey geomometry
Returns:
STOMP::TreeMap object
"""
print("Creating %i randoms..." % n_randoms)
random_vect = stomp.AngularVector()
stomp_map.GenerateRandomPoints(random_vect, n_randoms)
random_tree = stomp.TreeMap(
int(np.max((128, stomp_map.RegionResolution()))), 200)
print("\tLoading randoms into tree map...")
for rand_ang in random_vect:
random_tree.AddPoint(rand_ang, 1.0)
return random_tree
|
c3c73e358d767e46064400d9b48e99a79b7bcfea
| 29,151 |
def mobilenetv1():
"""Handler da página inicial do modelo de Mobilenet V1
:return:
"""
return render_template("mobilenetv1.html")
|
edf8cb04c715c4ce0d3c70883813fb11d0640a19
| 29,153 |
def move(board):
"""Queries the user to move. Returns false if the user puts in an invalid input or move, returns true if the move was successful"""
start_input = input("MOVE WHICH PIECE? ")
if not start_input.isdigit():
return False
start = int(start_input)
if start not in board or board[start] != "!":
return False
end_input = input("TO WHERE? ")
if not end_input.isdigit():
return False
end = int(end_input)
if end not in board or board[end] != "O":
return False
difference = abs(start - end)
center = (end + start) / 2
if (
(difference == 2 or difference == 18)
and board[end] == "O"
and board[center] == "!"
):
board[start] = "O"
board[center] = "O"
board[end] = "!"
return True
else:
return False
|
3377b4f349c9519eff4ede707d10e08038e9d7fc
| 29,154 |
import re
def _read_reaction_kinetic_law_from_sbml(reaction, mass_reaction, f_replace, **kwargs):
"""Read the SBML reaction kinetic law and return it.
Warnings
--------
This method is intended for internal use only.
"""
mass_rid = mass_reaction.id
sbml_species = (
list(reaction.getListOfReactants())
+ list(reaction.getListOfProducts())
+ list(reaction.getListOfModifiers())
)
sbml_species = [sref.getSpecies() for sref in sbml_species]
local_parameters = {}
if reaction.isSetKineticLaw():
sbml_rid = reaction.getIdAttribute()
kinetic_law = reaction.getKineticLaw()
# Get the kinetic law and the rate equation as a string.
kinetic_law = reaction.getKineticLaw()
rate_eq = _check_required(kinetic_law, kinetic_law.getFormula(), "formula")
# Perform substitution for power law operations to sympify rate
for match in _KLAW_POW_RE.finditer(rate_eq):
old = match.group(0)
new = "(({0})**{1})".format(match.group("arg"), match.group("exp"))
rate_eq = rate_eq.replace(old, new)
# Try to sympify the reaction rate
try:
rate_eq = sympify(rate_eq)
except SympifyError as e:
raise MassSBMLError(e)
# If ID replacements were performed earlier then apply the ID
# replacements for metabolite and parameter arguments in rate law also.
id_subs = {}
for arg in list(rate_eq.atoms(Symbol)):
arg = str(arg)
new_arg = arg
# Check if reaction is in the name of the parameter
if re.search(sbml_rid, arg) and sbml_rid != mass_rid:
new_arg = _get_corrected_id(
new_arg,
(sbml_rid, mass_rid, arg),
"Parameter",
kwargs.get("remove_char"),
)
elif arg in sbml_species:
new_arg = _get_corrected_id(
new_arg, (f_replace, F_SPECIE), None, kwargs.get("remove_char")
)
else:
if kwargs.get("remove_char"):
new_arg = _remove_char_from_id(new_arg)
id_subs[arg] = new_arg
# Make rate equation
rate_eq = rate_eq.subs(id_subs)
for local_parameter in kinetic_law.getListOfLocalParameters():
pid = _check_required(
local_parameter, local_parameter.getIdAttribute(), "id"
)
value = local_parameter.getValue()
if re.search(sbml_rid, pid) and sbml_rid != mass_rid:
pid = _get_corrected_id(
pid,
(sbml_rid, mass_rid, pid),
"Parameter",
kwargs.get("remove_char"),
)
elif kwargs.get("remove_char"):
pid = _remove_char_from_id(pid)
local_parameters[pid] = value
else:
LOGGER.warning(
"No kinetic law found for SBML reaction '%s'. Therefore, assigning"
" the MassReaction '%s' a rate law based on Mass Action Kinetics.",
reaction,
mass_rid,
)
rate_eq = mass_reaction.get_mass_action_rate(1)
return rate_eq, local_parameters
|
ea800e5b7ccde7dbc87c11066176f963e0367256
| 29,155 |
def _total_probe_count_without_interp(params, probe_counts):
"""Calculate a total probe count without interpolation.
This assumes that params are keys in the datasets of probe_counts.
The result of ic._make_total_probe_count_across_datasets_fn should give
the same count as this function (if params are keys in the datasets
of probe_counts). But this uses probe_counts directly and can be
used as a sanity check -- i.e., it does not do any interpolation.
Args:
params: parameter values to use when determining probe counts;
params[i] is the (i % N)'th parameter of the (i/N)'th dataset,
where N is the number of datasets
probe_counts: dict giving number of probes for each dataset and
choice of parameters
Returns:
total number of probes across all datasets, according to the
given values of params
"""
num_datasets = len(probe_counts)
# The total number of parameters must be a multiple of the number
# of datasets
assert len(params) % num_datasets == 0
num_params = int(len(params) / num_datasets)
s = 0
for i, dataset in enumerate(sorted(probe_counts.keys())):
p = tuple(params[num_params * i + j] for j in range(num_params))
s += probe_counts[dataset][p]
return s
|
0973e667dbf1fc3bdf476791cbf709549230f94b
| 29,156 |
from typing import Any
import math
def make_divisible(x: Any, divisor: int):
"""Returns x evenly divisible by divisor."""
return math.ceil(x / divisor) * divisor
|
bfbcfb334777a6c7214f16aa0fadd56906e2b7bc
| 29,158 |
def one_vehicle_xml():
"""Emulates a XML response for 1 vehicle trajectory"""
STREAM = b'<INST nbVeh="1" val="2.00"><CREATIONS><CREATION entree="Ext_In" id="1" sortie="Ext_Out" type="VL"/></CREATIONS><SORTIES/><TRAJS><TRAJ abs="25.00" acc="0.00" dst="25.00" id="0" ord="0.00" tron="Zone_001" type="VL" vit="25.00" voie="1" z="0.00"/></TRAJS><STREAMS/><LINKS/><SGTS/><FEUX/><ENTREES><ENTREE id="Ext_In" nb_veh_en_attente="1"/></ENTREES><REGULATIONS/></INST>'
return STREAM
|
792cfb5895fd033c40a4cdbf6e79083c865d0093
| 29,160 |
def select_data(all_tetrode_data, index):
"""
Select tetrode data by trial indices.
:param all_tetrode_data: (list of 4d numpy arrays) each of format [trial, 1, neuron + tetrode, time]
:param index: (1d numpy array) trial indices
:return: (list of 4d numpy arrays) selected subset of tetrode data
"""
current_data = []
for x in all_tetrode_data:
current_data.append(x[index, :, :, :])
return current_data
|
5a883771ef499e0b82e0d3ac5b86550180760e13
| 29,161 |
from datetime import datetime
def normalize(ds_train, ds_cv, ds_test):
"""
Normalization of datasets
Parameters
----------
ds_train: Dataset
Training set
ds_cv: Dataset
Cross-validation set
ds_test: Dataset
Test set
Returns
-------
norm_train: Dataset
Normalized training set
norm_cv: Dataset
Normalized cross-validation set
norm_test: Dataset
Normalized test set
"""
t = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print("[INFO] {} - Normalizing training set ... ".format(t))
normalizer = dl.Normalization()
ds_train.data = normalizer.fit_and_transform(ds_train.data,
method='z_score_std',
per_col_scaler=True)
t = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print("[INFO] {} - Normalizing crossval set ... ".format(t))
ds_cv.data = normalizer.transform(ds_cv.data)
t = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print("[INFO] {} - Normalizing test set ... ".format(t))
ds_test.data = normalizer.transform(ds_test.data)
return ds_train, ds_cv, ds_test
|
ad6731096e1081f3ff764ec055d4d3035a40ecbe
| 29,162 |
def generate_sequential(num_users=100,
num_items=1000,
num_interactions=10000,
concentration_parameter=0.1,
order=3,
random_state=None):
"""
Generate a dataset of user-item interactions where sequential
information matters.
The interactions are generated by a n-th order Markov chain with
a uniform stationary distribution, where transition probabilities
are given by doubly-stochastic transition matrix. For n-th order chains,
transition probabilities are a convex combination of the transition
probabilities of the last n states in the chain.
The transition matrix is sampled from a Dirichlet distribution described
by a constant concentration parameter. Concentration parameters closer
to zero generate more predictable sequences.
Parameters
----------
num_users: int, optional
number of users in the dataset
num_items: int, optional
number of items (Markov states) in the dataset
num_interactions: int, optional
number of interactions to generate
concentration_parameter: float, optional
Controls how predictable the sequence is. Values
closer to zero give more predictable sequences.
order: int, optional
order of the Markov chain
random_state: numpy.random.RandomState, optional
random state used to generate the data
Returns
-------
Interactions: :class:`spotlight.interactions.Interactions`
instance of the interactions class
"""
if random_state is None:
random_state = np.random.RandomState()
transition_matrix = _build_transition_matrix(
num_items - 1,
concentration_parameter,
random_state)
user_ids = np.sort(random_state.randint(0,
num_users,
num_interactions,
dtype=np.int32))
item_ids = _generate_sequences(num_interactions,
transition_matrix,
order,
random_state) + 1
timestamps = np.arange(len(user_ids), dtype=np.int32)
ratings = np.ones(len(user_ids), dtype=np.float32)
return Interactions(user_ids,
item_ids,
ratings=ratings,
timestamps=timestamps,
num_users=num_users,
num_items=num_items)
|
1a9a23fda9c17d5b7085d860986aab78368a4408
| 29,163 |
def expand(fluid, pfinal, eta):
"""Adiabatically expand a fluid to pressure pfinal, using
a turbine with isentropic efficiency eta."""
h0 = fluid.enthalpy_mass()
s0 = fluid.entropy_mass()
fluid.set(S = s0, P = pfinal)
h1s = fluid.enthalpy_mass()
isentropic_work = h0 - h1s
actual_work = isentropic_work * eta
h1 = h0 - actual_work
fluid.set(H = h1, P = pfinal)
return actual_work
|
acf8cd63684ccf3c41c38cc631d66b4bc143c5c6
| 29,164 |
def ADOSC(
frame,
fast=3,
slow=10,
high_col="high",
low_col="low",
close_col="close",
vol_col="Volume",
):
"""Chaikin A/D oscillator"""
return _frame_to_series(
frame, [high_col, low_col, close_col, vol_col], talib.ADOSC, fast, slow
)
|
61b4959407d68fce2023a135253e02aa7e3428fc
| 29,165 |
def op_scr(
gep: pd.DataFrame,
gross_tp: pd.DataFrame,
ul_exp: float,
bscr: float
):
"""
SCR Op Risk module
Inputs:
- Gross EP last 12m and 12m prior
- Gross TP: BEL should be positive
- BSCR
"""
op_premiums = 0.04 * (gep.at['life_all', 'gep_last12m'] - gep.at['life_ul', 'gep_last12m']) \
+ 0.04 * max(0., (gep.at['life_all', 'gep_last12m'] - gep.at['life_ul', 'gep_last12m'])
- 1.2 * (gep.at['life_all', 'gep_prior12m'] - gep.at['life_ul', 'gep_prior12m'])) \
+ 0.03 * gep.at['nl', 'gep_last12m'] \
+ 0.03 * max(0., gep.at['nl', 'gep_last12m'] - 1.2 * gep.at['nl', 'gep_prior12m'])
op_provisions = 0.0045 * max(0., gross_tp.at['life_all'] - gross_tp.at['life_ul']) \
+ 0.03 * max(0., gross_tp.at['nl'])
op = max(op_premiums, op_provisions)
scr_op = min(op, 0.3 * bscr) + 0.25 * ul_exp
return scr_op, op
|
fc1455e5ad7d4da92068b18b80a0ce929b5a9a50
| 29,166 |
def parse_voyager_sclk(sclk, planet=None):
"""Convert a Voyager clock string (FDS) to a numeric value.
Typically, a partition number is not specified for FDS counts. However, if
it is, it must be compatible with the planetary flyby. The partition number
is 2 for Jupiter and Saturn, 3 for Uranus, and 4 for Neptune.
If the planet is not specified (planet = None), then any partition value in
the range 2-4 is allowed and its value is ignored. If the planet is given as
input (5 for Jupiter, 6 for Saturn, 7 for Uranus, 8 for Neptune), then an
explicitly stated partition number must be compatible with the associated
planetary flyby.
"""
assert planet in (None, 5, 6, 7, 8), 'Invalid planet value: ' + str(planet)
# Check the partition number before ignoring it
parts = sclk.split('/')
if len(parts) > 2:
raise ValueError('Invalid FDS format, extraneous "/": ' + sclk)
if len(parts) == 2:
try:
partition = int(parts[0])
except ValueError:
raise ValueError('Partition number is not an integer: ' + sclk)
if planet is None:
if partition not in VOYAGER_PLANET_PARTITIONS.values():
raise ValueError('Partition number out of range 2-4: ' + sclk)
else:
required_partition = VOYAGER_PLANET_PARTITIONS[planet]
if partition != required_partition:
name = VOYAGER_PLANET_NAMES[planet]
raise ValueError('Partition number for %s flyby ' % name +
'must be %d: ' % required_partition + sclk)
sclk = parts[1]
# Separator can be '.' or ':'
if '.' in sclk:
parts = sclk.split('.')
elif ':' in sclk:
parts = sclk.split(':')
else:
parts = [sclk]
if len(parts) > 3:
raise ValueError('More than three fields in Voyager clock: ' + sclk)
# Make sure field are integers
ints = []
try:
for part in parts:
ints.append(int(part))
except ValueError:
raise ValueError('Voyager clock fields must be integers: ' + sclk)
# If we have just a single six- or seven-digit number, maybe the separator
# was omitted. This is how Voyager image names are handled.
if len(ints) == 1 and ints[0] >= 100000:
ints = [ints[0] // 100, ints[0] % 100]
# Append fields to make three
if len(ints) == 1:
ints.append(0)
if len(ints) == 2:
ints.append(1)
# Check fields for valid ranges
if ints[0] > 65535 or ints[0] < 0:
raise ValueError('Voyager clock "hours" out of range 0-65535: ' + sclk)
if ints[1] > 59 or ints[1] < 0:
raise ValueError('Voyager clock "minutes" out of range 0-59: ' + sclk)
if ints[2] > 800 or ints[2] < 1:
raise ValueError('Voyager clock "seconds" out of range 1-800: ' + sclk)
# Return in units of FDS hours
return ints[0] + (ints[1] + (ints[2]-1) / 800.) / 60.
|
237695d43fe17af4f1d7fb704c01ab925099e663
| 29,167 |
def format_url(url):
"""
Formats url by adding 'http://' if necessary and deleting 'www.'
:param url: ulr to article or domain
:return: formatted url e.g. the following urls:
'http://www.google.pl/', 'google.pl/', 'google.pl/', 'www.google.pl/',
'http://google.pl/', 'https://www.google.pl/'
will be all formatted to: http://google.pl/
"""
parsed_url = urlparse(url, 'http')
netloc = parsed_url.netloc or parsed_url.path
path = parsed_url.path if parsed_url.netloc else ''
netloc = netloc.replace('www.', '')
parsed_url = ParseResult('http', netloc, path, *parsed_url[3:])
if not validators.url(parsed_url.geturl()):
raise ValueError('Provided url=' + url + ' is not valid')
return parsed_url.geturl()
|
a9d99b3ad73efb2d79931e9f0d75b1ea557fc6f4
| 29,168 |
def append(arr, values, axis=None):
"""Append to the end of an array along axis (ravel first if None)
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
|
9654f761bd7437840e355abc7b881e3dbe6dd260
| 29,169 |
def volo_d4_448(pretrained=False, **kwargs):
""" VOLO-D4 model, Params: 193M """
model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs)
model = _create_volo('volo_d4_448', pretrained=pretrained, **model_args)
return model
|
c7e51cf1af050d79d5c31ef1b7aa107d6eac9c27
| 29,170 |
from functools import reduce
def rec_hasattr(obj, attr):
"""
Recursive hasattr.
:param obj:
The top-level object to check for attributes on
:param attr:
Dot delimited attribute name
Example::
rec_hasattr(obj, 'a.b.c')
"""
try:
reduce(getattr, attr.split('.'), obj)
except AttributeError:
return False
else:
return True
|
b1a9b12f54abb93202a5b41c950f761986307170
| 29,171 |
def find_svos(tokens):
"""
Extracts all the subject-verb objects in a list of tokens.
:param tokens: the parsed list.
:return: a list of the subject verb objects.
"""
svos = []
verbs = [tok for tok in tokens if tok.pos_ == "VERB" and tok.dep_ != "aux"]
for verb in verbs:
subs, verb_negated = get_all_subs(verb)
# hopefully there are subs, if not, don't examine this verb any longer
if subs:
verb, objs = get_all_objs(verb)
for sub in subs:
for obj in objs:
obj_negated = is_negated(obj)
svos.append((sub.lower_, "!" + verb.lower_
if verb_negated or obj_negated else verb.lower_, obj.lower_))
return svos
|
1ece330f828dcf54d1a010127b583327b24aa682
| 29,172 |
def def_axiom(arg1):
"""
def-axiom rule prove propositional tautologies axioms.
for reason that prove need propositional logic decision procedure,
currently use proofterm.sorry
"""
# Ts = analyze_type(arg1)
# if IntType in Ts:
# pt = refl(arg1).on_rhs(
# top_conv(rewr_conv('int_ite01')),
# bottom_conv(rewr_conv('eq_mean_true')),
# bottom_conv(integer.int_norm_eq()),
# bottom_conv(integer.int_neq_false_conv()),
# proplogic.norm_full()
# )
# pt = pt.symmetric()
# try:
# basic.load_theory('sat')
# pt_cnf = solve_cnf(pt.lhs)
# basic.load_theory('smt')
# return pt.equal_elim(pt_cnf)
# except:
# pass
# try:
# return solve_cnf(arg1)
# except:
return ProofTerm.sorry(Thm([], arg1))
|
ccf2b1a4ca57a96a09772d1f17c11ba345e62a31
| 29,173 |
def not_shiptoast_check(self, message):
"""Checks whether the message object is not in a shiptoast chat."""
if (message.channel.id in self.settings["shiptoast"]) or (message.channel.name in self.settings["shiptoast"]):
return False
else:
return True
|
b951ee6be9d9173065f340eda08e997b83964fe4
| 29,174 |
def jaccard_distance_loss(y_true, y_pred, smooth=100):
"""
Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|)
= sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|))
"""
intersection = tf.reduce_sum(tf.math.abs(y_true * y_pred), axis=-1)
sum_ = tf.reduce_sum(tf.math.abs(y_true) + tf.math.abs(y_pred), axis=-1)
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return tf.reduce_sum((1 - jac) * smooth)
|
3ed1236856bc911210f19882a03c107f82450996
| 29,175 |
def unshare_document(token, docid, userid):
"""
Unshares a document from another user.
:param token:
The user JWT token.
:type token:
str
:param docid:
The DocID of the document.
:type docid:
str
:param userid:
The UserID of the user to be unshared from.
:type userid:
str
:raises grpc.RpcError:
Raised by the gRPC library to indicate non-OK-status RPC termination.
:returns:
The number of users unshared with.
:rtype:
int
"""
with client.connect_to_server_with_auth(token) as auth_conn:
client_stub = strongdoc_pb2_grpc.StrongDocServiceStub(auth_conn)
request = document_pb2.UnshareDocumentReq(docID=docid, userID=userid)
response = client_stub.UnshareDocument(request, timeout=constants.GRPC_TIMEOUT)
return response.count
|
c79479d93ee687dece0d60137d8837a17c306fca
| 29,177 |
import copy
import torch
def AlterNChannels(layer2alter_id, new_n_channels, old_model):
"""
Function to increase number of channels
Args:
layer2alter_id: layer to change
new_n_channels: number of channels for the altered layer
old_model: model before mutation
Returns:
Returns mutated model
"""
new_model_descriptor = copy.deepcopy(old_model['model_descriptor'])
old_pytorch_model = old_model['pytorch_model']
# Get layer where altering number of channels and also subsequent layers (as input of subsequent layer is changed)
layer2alter_conv = [layer for layer in new_model_descriptor['layers'] if layer['id'] == layer2alter_id][0]
layer2alter_bn = [layer for layer in new_model_descriptor['layers'] if layer['input'] == [layer2alter_id]][0]
layer2alter_acti = [layer for layer in new_model_descriptor['layers'] if layer['input'] == [layer2alter_bn['id']]][0]
subsequentlayer2alter = [layer for layer in new_model_descriptor['layers'] if
layer2alter_acti['id'] in layer['input']]
layer_type = layer2alter_conv['type']
# Check some constraints
assert ((layer2alter_conv['type'] == 'conv') or (layer2alter_conv['type'] == 'sep')), 'Error: Layer hast to be conv or sepconv layer.'
assert layer2alter_conv['params']['channels'] < new_n_channels, 'Error: Can only increase number of channels.'
assert len(subsequentlayer2alter) == 1, 'Error, more than one outgoing connection not allowed'
assert ((subsequentlayer2alter[0]['type'] == 'conv') or (
subsequentlayer2alter[0]['type'] == 'dense')), 'Error, subsequent layer has to be conv or dense layer'
# Make necessary changes to new descriptor
layer2alter_conv['params']['channels'] = new_n_channels
# For new architecture
layer2alter_bn['params']['in_channels'] = new_n_channels
old_id_conv = layer2alter_conv['id']
old_id_bn = layer2alter_bn['id']
old_id_sub = subsequentlayer2alter[0]['id']
new_id_conv = utils.GetUnusedID(new_model_descriptor)
new_id_bn = new_id_conv + 1
new_id_acti = new_id_conv + 2
new_id_sub = new_id_conv + 3
layer2alter_conv['id'] = new_id_conv
layer2alter_bn['id'] = new_id_bn
layer2alter_bn['input'] = [new_id_conv]
layer2alter_acti['id'] = new_id_acti
layer2alter_acti['input'] = [new_id_bn]
subsequentlayer2alter[0]['input'] = [new_id_acti]
subsequentlayer2alter[0]['id'] = new_id_sub
subsubsequentlayers = [layer for layer in new_model_descriptor['layers'] if old_id_sub in layer['input']]
# For new architecture
for layer in subsequentlayer2alter:
layer['params']['in_channels'] = new_n_channels
utils.ReplaceInput(subsubsequentlayers, old_id_sub, new_id_sub)
new_pytorch_model = ConvNet(new_model_descriptor)
new_pytorch_model.cuda()
new_pytorch_model = utils.InheritWeights(old_model['pytorch_model'], new_pytorch_model)
# Modify weights of changed layers
if layer_type == 'conv':
# Conv layer where number of channels have been changed
new_weights_conv = copy.deepcopy(new_pytorch_model._modules[str(new_id_conv)].weight)
new_bias_conv = copy.deepcopy(new_pytorch_model._modules[str(new_id_conv)].bias)
old_weights_conv = copy.deepcopy(old_pytorch_model._modules[str(old_id_conv)].weight)
old_bias_conv = copy.deepcopy(old_pytorch_model._modules[str(old_id_conv)].bias)
# Recalculate
new_weights_conv[0:old_weights_conv.shape[0], :, :, :] = nn.Parameter(old_weights_conv)
new_bias_conv[0:old_bias_conv.shape[0]] = nn.Parameter(old_bias_conv)
state_dict = {"weight": new_weights_conv.cuda(),
"bias": new_bias_conv.cuda()}
new_pytorch_model._modules[str(new_id_conv)].load_state_dict(state_dict)
elif layer_type == 'sep':
# Depthwise
old_weights_conv = copy.deepcopy(old_pytorch_model._modules[str(old_id_conv)].depthwise.weight)
old_bias_conv = copy.deepcopy(old_pytorch_model._modules[str(old_id_conv)].depthwise.bias)
state_dict = {"weight": nn.Parameter(old_weights_conv).cuda(),
"bias": nn.Parameter(old_bias_conv).cuda()}
new_pytorch_model._modules[str(new_id_conv)].depthwise.load_state_dict(state_dict)
# Pointwise
new_weights_conv = copy.deepcopy(new_pytorch_model._modules[str(new_id_conv)].pointwise.weight)
new_bias_conv = copy.deepcopy(new_pytorch_model._modules[str(new_id_conv)].pointwise.bias)
old_weights_conv = copy.deepcopy(old_pytorch_model._modules[str(old_id_conv)].pointwise.weight)
old_bias_conv = copy.deepcopy(old_pytorch_model._modules[str(old_id_conv)].pointwise.bias)
# Recalculate
new_weights_conv[0:old_weights_conv.shape[0], :, :, :] = nn.Parameter(old_weights_conv)
new_bias_conv[0:old_bias_conv.shape[0]] = nn.Parameter(old_bias_conv)
state_dict = {"weight": new_weights_conv.cuda(),
"bias": new_bias_conv.cuda()}
new_pytorch_model._modules[str(new_id_conv)].pointwise.load_state_dict(state_dict)
# Copy old weights for BN layer
new_weights_bn = []
new_weights_bn.append(copy.deepcopy(new_pytorch_model._modules[str(new_id_bn)].weight))
new_weights_bn.append(copy.deepcopy(new_pytorch_model._modules[str(new_id_bn)].bias))
new_weights_bn.append(copy.deepcopy(new_pytorch_model._modules[str(new_id_bn)].running_mean))
new_weights_bn.append(copy.deepcopy(new_pytorch_model._modules[str(new_id_bn)].running_var))
old_weights_bn = []
old_weights_bn.append(copy.deepcopy(old_pytorch_model._modules[str(old_id_bn)].weight))
old_weights_bn.append(copy.deepcopy(old_pytorch_model._modules[str(old_id_bn)].bias))
old_weights_bn.append(copy.deepcopy(old_pytorch_model._modules[str(old_id_bn)].running_mean))
old_weights_bn.append(copy.deepcopy(old_pytorch_model._modules[str(old_id_bn)].running_var))
for weight_idx, weight in enumerate(new_weights_bn):
if weight_idx < 2:
new_weights_bn[weight_idx][0:old_weights_bn[weight_idx].shape[0]] = nn.Parameter(
old_weights_bn[weight_idx])
else:
new_weights_bn[weight_idx][0:old_weights_bn[weight_idx].shape[0]] = old_weights_bn[weight_idx]
state_dict = {"weight": new_weights_bn[0].cuda(),
"bias": new_weights_bn[1].cuda(),
"running_mean": new_weights_bn[2].cuda(),
"running_var": new_weights_bn[3].cuda()}
new_pytorch_model._modules[str(new_id_bn)].load_state_dict(state_dict)
new_weights_sub = copy.deepcopy(new_pytorch_model._modules[str(new_id_sub)].weight)
old_weights_sub = copy.deepcopy(old_pytorch_model._modules[str(old_id_sub)].weight)
old_bias_sub = copy.deepcopy(old_pytorch_model._modules[str(old_id_sub)].bias)
# Copy old weights
new_weights_sub[:, 0:old_weights_sub.shape[1], :, :] = old_weights_sub
# Fill up new channels with 0's
new_weights_sub[:, old_weights_sub.shape[1]:, :, :] = torch.from_numpy(
np.zeros(shape=new_weights_sub[:, old_weights_sub.shape[1]:, :, :].shape))
new_bias_sub = copy.deepcopy(old_bias_sub)
state_dict = {"weight": nn.Parameter(new_weights_sub.cuda()),
"bias": nn.Parameter(new_bias_sub.cuda())}
new_pytorch_model._modules[str(new_id_sub)].load_state_dict(state_dict)
new_model = {'pytorch_model': new_pytorch_model,
'model_descriptor': new_model_descriptor,
'topo_ordering': new_pytorch_model.topo_ordering}
return new_model
|
a6e02739eddd5c1de572f303b580bcd9c72a272a
| 29,178 |
def generate_bins(bins, values=None):
"""Compute bin edges for numpy.histogram based on values and a requested bin parameters
Unlike `range`, the largest value is included within the range of the last, largest value,
so generate_bins(N) with produce a sequence with length N+1
Arguments:
bins (int or 2-tuple of floats or sequence of floats) s or the first pair of bin edges
>>> generate_bins(0, [])
[0]
>>> generate_bins(3, [])
[0, 1, 2, 3]
>>> generate_bins(0)
[0]
>>> generate_bins(10)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> generate_bins(10, range(21))
[0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0]
>>> generate_bins((0, 3), range(21))
[0, 3, 6, 9, 12, 15, 18, 21]
"""
if isinstance(bins, int):
bins = (bins,)
if isinstance(bins, float):
bins = (0, bins)
if not len(bins) in (1, 2):
return bins
if values is None or not hasattr(values, '__iter__') or not any(values) or not hasattr(values, '__len__') or len(values) < 1:
values = [0]
value_min, value_max = pd.np.min(values), pd.np.max(values)
value_range = value_max - value_min
if len(bins) == 1:
if not value_range:
return range(int(bins[0]) + 1)
bins = (0, value_range / float(bins[0]))
if len(bins) == 2:
if not value_range:
return bins
binwidth = ((bins[1] - bins[0]) or 1)
bin0 = bins[0] or pd.np.min(values)
if (bin0 / value_range) <= .3:
bin0 = 0
numbins = int(value_range / float(binwidth))
bins = list(pd.np.arange(numbins + 1) * binwidth + bin0)
else:
binwidth = pd.np.min(pd.np.diff(bins)) or pd.np.mean(pd.np.diff(bins)) or 1.
bins = list(bins)
while bins[-1] < value_max:
bins.append(bins[-1] + binwidth)
return bins
|
2d448746658193b8dd6c3ac3ef27418b37116a93
| 29,179 |
def makeRollAnswerStr( roll_res, mention_str ):
"""Formats an answer string depending on the roll result. If provided with an invalid roll result, returns 'None'."""
answer = None
if roll_res == None:
answer = "Invalid dice expression !"
elif len(roll_res)==2: #either threshold or success roll
res,aux = roll_res
if isinstance(res,bool): #threshold roll
#care, bool apparently extand from int in python
if res:
answer = "{} succeeded ! (Roll value was: `{}`)".format(mention_str,aux)
else:
answer = "{} failed ! (Roll value was: `{}`)".format(mention_str,aux)
elif isinstance(res,int): #success roll
answer = "{} succeeded `{}` times ! (Number of attempts: `{}`)".format(mention_str,res,aux)
elif len(roll_res)==3: #default roll
res,minVal,maxVal = roll_res
answer = "{} rolled a `{}`! (Possible values between `{}` and `{}`)".format(mention_str,res,minVal,maxVal)
if answer == None:
loc_log.warning("makeRollAnswerStr: The 'roll_res' argument '{}' is invalid !".format(roll_res))
return answer
|
940f43b5592ff0da6d941bcb13b100c8fb2a590e
| 29,180 |
import math
def ECSPower(min, max, size):
"""
on modélise l'eau du réseau comme une fonction sinusoidale de période annuelle
cette fonction est complètement calée sur un fichier météo qui commence au 1er janvier mais qui peut être pluriannuel
min : température minimale d'injection de l'eau du réseau dans le ballon
max : température maximale d'injection de l'eau du réseau dans le ballon
"""
T_water=np.zeros(size)
## période
w = 2*math.pi/npy
for i in range(size):
# numéro de step dans l'année
siy = i - npy*(i//npy)
T_water[i]= 0.5 * ( (min-max)* math.cos(w*siy) + max + min )
# le besoin s'entend pour une journée, ie 24*3600 secondes
# il faut donc diviser par 24*3600 pour convertir de J à W, Cpf étant exprimée en J/kg/K
return Volume_ballon*Npers*(Tballon-T_water)*Cpf/(24*3600)
|
c8f2422bfc066fc2e87caa3d2d87b07d0f1e4335
| 29,181 |
from typing import Dict
from typing import Any
def __create_notification(title: str, content: str) -> Dict[str, Any]:
"""
Creates a notification "object" from the given title and content.
:params title: The title of the notification.
:params content: The content of the notification.
:returns A dictionary representing a notification "object".
"""
return {"title": title, "content": content}
|
484abcc2afcb8f726811e36516572bc5c302a415
| 29,182 |
def readme():
"""Read and patch README."""
readme_text = read('README.rst')
# PyPI does not accept :class: references.
return readme_text.replace(':class:`base64io.Base64IO`', '``base64io.Base64IO``')
|
bad97b377022ec15e0dc0c0c3bcb984924dce216
| 29,183 |
def generator_dcgan(noise_dim, img_source_dim,img_dest_dim, bn_mode,deterministic,pureGAN,inject_noise,wd, model_name="generator_dcgan"):
"""DCGAN generator based on Upsampling and Conv2D
Args:
noise_dim: Dimension of the noise input
img_dim: dimension of the image output
bn_mode: keras batchnorm mode
model_name: model name (default: {"generator_upsampling"})
dset: dataset (default: {"mnist"})
Returns:
keras model
"""
s = img_source_dim[1]
f = 512
# shp = np.expand_dims(img_dim[1:],1) # to make shp= (None, 1, 28, 28) but is not working
start_dim = int(s / 4)
nb_upconv = 2
nb_filters = 64
if K.image_dim_ordering() == "th":
bn_axis = 1
input_channels = img_source_dim[0]
output_channels = img_dest_dim[0]
reshape_shape = (input_channels, s, s)
shp=reshape_shape
else:
bn_axis = -1
input_channels = img_source_dim[-1]
output_channels = img_dest_dim[-1]
reshape_shape = (s, s, input_channels)
shp=reshape_shape
gen_noise_input = Input(shape=noise_dim, name="generator_input")
gen_image_input = Input(shape=shp, name="generator_image_input")
start_dim = int(s / 16)
n_fc_filters = 16
x = Dense(n_fc_filters * 16 * 16, input_dim=noise_dim, weight_norm=True,init="he_normal")(gen_noise_input) #WN = True in AFFINE
x = Activation("relu")(x)
# x = Dense(n_fc_filters * 16 * 16, input_dim=noise_dim)(x)
# x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x)
# x = Activation("relu")(x)
x = Reshape((n_fc_filters,16,16))(x)
# Upscaling blocks: Upsampling2D->Conv2D->ReLU->BN->Conv2D->ReLU
for i in range(nb_upconv):
x = UpSampling2D(size=(2, 2))(x)
nb_filters = int(f / (2 ** (i + 1)))
x = Convolution2D(nb_filters, 3, 3, border_mode="same",weight_norm=True, kernel_initializer="he_normal")(x)
# x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x)
x = Activation("relu")(x)
x = Convolution2D(nb_filters, 3, 3, border_mode="same",weight_norm=True, kernel_initializer="he_normal")(x)
x = Activation("relu")(x)
# Last Conv to get the output image
x = Convolution2D(output_channels, 3, 3, name="gen_conv2d_final",
border_mode="same", activation='tanh', kernel_initializer="he_normal")(x) #W_constraint=unitnorm()
generator_model = Model(input=[gen_noise_input,gen_image_input], output=[x], name=model_name)
visualize_model(generator_model)
return generator_model
|
9d8d481fc9688b30fd3b9ffdb5914a61291b56b7
| 29,184 |
def emg21(peak_index, x_pos, amp, init_pars=pars_dict,
vary_shape_pars=True, index_first_peak=None):
"""
Hyper-EMG(2,1) lmfit model (single-peak fit model with two exponential tails
on the left and one exponential tail on the right)
Parameters
----------
peak_index : int
Index of peak to fit.
x_pos : float
Initial guess of peak centroid.
amp : float
Initial guess of peak amplitude.
init_pars : dict
Initial parameters for fit ('amp' and 'mu' parameters in `init_pars`
dictionary are overwritten by the given `amp` and `x_pos` arguments)
vary_shape_pars : bool
Whether to vary or fix peak shape parameters (i.e. sigma, theta,
eta's and tau's).
index_first_peak : int
Index of the first peak to be fit in a multi-peak-fit. Only use this
during peak shape determination to enforce common shape parameters
for all peaks to be fitted. (For a regular fit with
``vary_shape_pars = False`` this is irrelevant.)
Returns
-------
:class:`lmfit.model.Model`
`lmfit` model object
"""
# Define model function
def emg21(x, amp, mu, sigma, theta, eta_m1,eta_m2,tau_m1,tau_m2,tau_p1):
return amp*h_emg(x, mu, sigma, theta, (eta_m1,eta_m2),(tau_m1,tau_m2),(1,),(tau_p1,)) # from emg_funcs.py
pref = 'p{0}_'.format(peak_index) # set prefix for respective peak (e.g. 'p0' for peak with index 0)
model = fit.Model(emg21, prefix = pref, nan_policy='propagate')
# Add parameters bounds or restrictions and define starting values
model.set_param_hint(pref+'amp', value=amp, min=1e-20)
model.set_param_hint(pref+'mu', value=x_pos, min=x_pos*(1-rel_var_mus), max=x_pos*(1+rel_var_mus))
model.set_param_hint(pref+'sigma', value= init_pars['sigma'], min=0, max=init_pars['sigma']+0.005, vary=vary_shape_pars)
model.set_param_hint(pref+'theta', value= init_pars['theta'], min=0, max=1, vary=vary_shape_pars)
model.set_param_hint(pref+'eta_m1', value= init_pars['eta_m1'], min=0, max=1, vary=vary_shape_pars)
model.set_param_hint(pref+'eta_m2', value= init_pars['eta_m2'], min=0, max=1, expr='1-'+pref+'eta_m1') # ensures normalization of eta_m's
model.set_param_hint(pref+'tau_m1', value= init_pars['tau_m1'], min=1e-12, max=upper_bound_taus, vary=vary_shape_pars)
model.set_param_hint(pref+'tau_m2', value= init_pars['tau_m2'], min=1e-12, max=upper_bound_taus, vary=vary_shape_pars)
model.set_param_hint(pref+'tau_p1', value= init_pars['tau_p1'], min=1e-12, max=upper_bound_taus, vary=vary_shape_pars)
# Enfore common shape parameters for all peaks
# (only needed during peak shape calibration)
if index_first_peak != None and (peak_index != index_first_peak):
first_pref = 'p{0}_'.format(index_first_peak)
model.set_param_hint(pref+'sigma', value= init_pars['sigma'], min=0, max=init_pars['sigma']+0.005, expr=first_pref+'sigma')
model.set_param_hint(pref+'theta', value= init_pars['theta'], min=0, max=1, expr=first_pref+'theta')
model.set_param_hint(pref+'eta_m1', value= init_pars['eta_m1'], min=0, max=1, expr=first_pref+'eta_m1' )
model.set_param_hint(pref+'eta_m2', value= init_pars['eta_m2'], min=0, max=1, expr='1-'+pref+'eta_m1') # ensures normalization of eta_m's
model.set_param_hint(pref+'tau_m1', value= init_pars['tau_m1'], min=1e-12, max=upper_bound_taus, expr=first_pref+'tau_m1')
model.set_param_hint(pref+'tau_m2', value= init_pars['tau_m2'], min=1e-12, max=upper_bound_taus, expr=first_pref+'tau_m2')
model.set_param_hint(pref+'tau_p1', value= init_pars['tau_p1'], min=1e-12, max=upper_bound_taus, expr=first_pref+'tau_p1')
return model
|
9e35deb35806aa1da1c70080a0eb0e5af022fe53
| 29,186 |
def _wait_and_retry(provider, job_id, poll_interval, retries, job_descriptor,
summary):
"""Wait for job and retry any tasks that fail.
Stops retrying an individual task when: it succeeds, is canceled, or has been
retried "retries" times.
This function exits when there are no tasks running and there are no tasks
eligible to be retried.
Args:
provider: job service provider
job_id: a single job ID (string) to wait for
poll_interval: integer seconds to wait between iterations
retries: number of retries
job_descriptor: job descriptor used to originally submit job
summary: whether to output summary messages
Returns:
Empty list if there was no error,
a list containing an error message from a failed task otherwise.
"""
while True:
formatted_tasks = []
tasks = provider.lookup_job_tasks({'*'}, job_ids=[job_id])
running_tasks = set()
completed_tasks = set()
canceled_tasks = set()
fully_failed_tasks = set()
task_fail_count = dict()
# This is an arbitrary task that is either fully failed or canceled (with
# preference for the former).
message_task = None
task_dict = dict()
for t in tasks:
task_id = t.get_field('task-id')
if task_id is not None:
task_id = int(task_id)
task_dict[task_id] = t
status = t.get_field('task-status')
if status == 'FAILURE':
# Could compute this from task-attempt as well.
task_fail_count[task_id] = task_fail_count.get(task_id, 0) + 1
if task_fail_count[task_id] > retries:
fully_failed_tasks.add(task_id)
message_task = t
elif status == 'CANCELED':
canceled_tasks.add(task_id)
if not message_task:
message_task = t
elif status == 'SUCCESS':
completed_tasks.add(task_id)
elif status == 'RUNNING':
running_tasks.add(task_id)
if summary:
formatted_tasks.append(
output_formatter.prepare_row(t, full=False, summary=True))
if summary:
formatter = output_formatter.TextOutput(full=False)
formatter.prepare_and_print_table(formatted_tasks, summary)
retry_tasks = (
set(task_fail_count).difference(fully_failed_tasks)
.difference(running_tasks).difference(completed_tasks)
.difference(canceled_tasks))
# job completed.
if not retry_tasks and not running_tasks:
# If there are any fully failed tasks, return the completion message of an
# arbitrary one.
# If not, but there are canceled tasks, return the completion message of
# an arbitrary one.
if message_task:
return [provider.get_tasks_completion_messages([message_task])]
# Otherwise successful completion.
return []
for task_id in retry_tasks:
identifier = '{}.{}'.format(job_id, task_id) if task_id else job_id
print(' {} (attempt {}) failed. Retrying.'.format(
identifier, task_fail_count[task_id]))
msg = task_dict[task_id].get_field('status-message')
print(' Failure message: ' + msg)
_retry_task(provider, job_descriptor, task_id,
task_fail_count[task_id] + 1)
SLEEP_FUNCTION(poll_interval)
|
fc0f78d1ceb9d4d26dbf7b92fedc2de33a4ac4e9
| 29,187 |
def summarize_2_dual_3(package_list):
"""
Given list of packages, return counts of (py3-only, dual-support, py2-only)
"""
py3 = 0
dual = 0
py2 = 0
for pkg in package_list:
if pkg['status'] == 'py3-only':
py3 += 1
elif pkg['status'] in PY2_STATUSES:
dual += 1
else:
py2 += 1
return py3, dual, py2
|
6a863b456a71fd51e1ac2744424a42495413778f
| 29,188 |
def isPTSF(p, T=[]):
"""
>>> from common.production import Production
>>> p = Production(['A'], [['\\"a\\"', '\\"b\\"'],['\\"cde\\"']])
>>> isPTSF(p)
True
>>> p = Production(['A'], [['\\"a\\"', '\\"b\\"'],['\\"cde\\"']])
>>> isPTSF(p, ['a', 'b', 'c', 'd', 'e'])
True
>>> p = Production(['A'], [['\\"a\\"', '\\"b\\"'],['\\"cde\\"']])
>>> isPTSF(p, ['a'])
False
>>> p = Production(['A'], [['a', 'b'],['Ade']])
>>> isPTSF(p)
False
"""
for opt in p.right:
for symbol in opt:
if not isLiteralValue(symbol, T):
return False
return True
|
13f1ed36bb93035490fde33dad3840fe8b98c263
| 29,191 |
def new_getvalue( state, name, p):
"""
Called every time a node value is used in an expression.
It will override the value for the current step only.
Returns random values for the node states
"""
global TARGETS
value = util.default_get_value( state, name, p )
if name in TARGETS:
# pick at random from True, False and original value
return choice( [True, False, value] )
else:
return value
|
30b6abacaf478936663b94c45fc2bb3951706299
| 29,192 |
def informe_ministerios():
"""
Listado de personas
"""
check_edit_or_admin()
roles = db.session.query(Rol).filter(Rol.tipo_rol == 'M')\
.join(relacion_miembros_roles,
relacion_miembros_roles.c.id_rol ==
Rol.id)\
.join(Miembro,
Miembro.id ==
relacion_miembros_roles.c.id_miembro)\
.add_columns(
Miembro.id,
Rol.nombre_rol)
query = db.session.query(Miembro)\
.outerjoin(relacion_miembros_roles,
Miembro.id ==
relacion_miembros_roles.c.id_miembro)\
.outerjoin(Rol,
Rol.id ==
relacion_miembros_roles.c.id_rol)\
.outerjoin(Direccion,
Miembro.id_direccion ==
Direccion.id)\
.outerjoin(TipoMiembro,
Miembro.id_tipomiembro ==
TipoMiembro.id)\
.outerjoin(EstadoCivil,
Miembro.id_estadocivil ==
EstadoCivil.id)\
.filter(Rol.tipo_rol == 'M')\
.add_columns(
Miembro.id,
Miembro.fullname,
Miembro.email,
Miembro.telefono_fijo,
Miembro.telefono_movil,
EstadoCivil.nombre_estado,
TipoMiembro.nombre_tipomiembro,
Direccion.tipo_via,
Direccion.nombre_via,
Direccion.nro_via,
Direccion.portalescalotros_via,
Direccion.cp_via,
Direccion.ciudad_via,
Direccion.provincia_via,
Direccion.pais_via)
query_miembros = query.all()
return render_template('informes/informe_ministerios.html',
informes=query_miembros, roles=roles)
|
3dddb4756a092faaa8b6f191f93e22787fb7e38d
| 29,194 |
def merge_regions_and_departments(regions, departments):
"""Merge regions and departments in one DataFrame.
The columns in the final DataFrame should be:
['code_reg', 'name_reg', 'code_dep', 'name_dep']
"""
return pd.merge(left=regions[["code", "name"]],
right=departments[['region_code',
"code", "name"]],
left_on='code',
right_on='region_code', suffixes=('_reg', '_dep'),
how='left').drop('region_code', axis=1)
|
0852df4d8ace31a74397ad88140336dbdf9488d2
| 29,195 |
import json
def updateResourceJsons(swagger,examplesDict,dirName):
"""
Update the Resource JSON file to include examples in other folder
"""
try:
# Iterate through all resources in the output folder
for id in range(len(swagger['tags'])):
resourceName = swagger['tags'][id]['name']
if resourceName == 'CapabilityStatement':
continue
# create swagger subset which was initially created in 'AnnotateFiles.py'
with open('./output/'+resourceName+'.json',encoding='utf8') as f:
swaggerSubset = json.load(f)
resourceExamples = {}
# Iterate through all examples for the resource
for example in examplesDict[resourceName]:
with open(dirName+"/"+example,encoding='utf8') as f:
exampleContents = json.load(f)
# Add the example keyed by the file name
resourceExamples[example] = {"value":exampleContents}
swaggerSubset['paths']['/'+resourceName]['post']['requestBody']['content']['application/fhir+json']['examples'] = resourceExamples
swagger['paths']['/'+resourceName]['post']['requestBody']['content']['application/fhir+json']['examples'] = resourceExamples
# Save the file with 'w' to overwrite current outputted file
with open('./output/'+resourceName+'.json','w',encoding='utf8') as f:
json.dump(swaggerSubset,f)
# Return status
with open('./output/openapi3.json','w',encoding='utf8') as f:
json.dump(swagger,f)
return "SUCCESS"
except Exception as e:
print("Error duing saving")
print(e)
return "ERROR"
|
3d9a7a31e3875bb7c56d8dfbd26ca5b73039101b
| 29,196 |
def sign(x):
"""Sign function.
:return -1 if x < 0, else return 1
"""
if x < 0: return -1
else: return 1
|
aae4fcf8fcfafca63593e908c264c08107640ec6
| 29,197 |
def set_default_dataseg(*args):
"""
set_default_dataseg(ds_sel)
Set default value of DS register for all segments.
@param ds_sel (C++: sel_t)
"""
return _ida_segregs.set_default_dataseg(*args)
|
e1f988537cb9eb0518fe5467d07d5487f3f8c440
| 29,198 |
def get_sms_history(key: str):
"""
Get SMS history.
:param str key: Authentication key.
:return: List of SMSHistoryItems.
"""
session = get_session(key)
url = f"{SITE_BASE_URL}/index.php?page=10&lang=en"
response = session.get(url)
pages = bs(response.text, "html.parser").find_all("span", {"class": "page_number"})
items = _parse_sms_history_items(response)
if len(pages) != 0:
del pages[0]
for page in pages:
items = items + _parse_sms_history_items(
session.post(url, {"cur_page": page.text})
)
return items
|
ed6f4a4a63d90fc91e25baa92179c220783f78b2
| 29,199 |
def core_rotd(sym_factor, flux_file_name, stoich):
""" Writes the string that defines the `Core` section for a
variational reaction-coordinate transition-state theory model of a
transition state for a MESS input file by
formatting input information into strings a filling Mako template.
:param sym_factor: symmetry factor of transition state
:type sym_factor: float
:param flux_file_name:
:type flux_file_name: str
:param stoich: combined stoichiometry of dissociation species 1 and 2
:type stoich: str
:rtype: str
"""
# Create dictionary to fill template
core_keys = {
'sym_factor': sym_factor,
'flux_file_name': flux_file_name,
'stoich': stoich
}
return build_mako_str(
template_file_name='core_rotd.mako',
template_src_path=SPEC_INFO_PATH,
template_keys=core_keys)
|
6213ec9da79340738142ccd76ffbab3d30470d36
| 29,200 |
def last_gen(genobj):
"""
迭代一个生成器对象,返回最后一个元素
:param genobj:
:return:
"""
for i in genobj:
last_e = i
return last_e
|
04e7cc57bf6406832cacaa04aa01b2ec877307df
| 29,201 |
def get_section_name_mapping(lattice):
"""."""
lat = lattice[:]
section_map = ['' for i in range(len(lat))]
# find where the nomenclature starts counting and shift the lattice:
start = _pyaccel.lattice.find_indices(lat, 'fam_name', 'start')[0]
b1 = _pyaccel.lattice.find_indices(lat, 'fam_name', 'B')
if b1[0] > start:
ind_shift = (b1[-1] + 1) # Next element of last b1
else:
for i in b1[::-1]: # except there is a b1 before start
if i < start:
ind_shift = i + 1
break
lat = _pyaccel.lattice.shift(lat, ind_shift)
# Find indices important to define the change of the names of
# the subsections
b = _pyaccel.lattice.find_indices(lat, 'fam_name', 'B')
qf = _pyaccel.lattice.find_indices(lat, 'fam_name', 'QF')
b_nrsegs = len(b)//50
# divide the ring in 50 sectors defined by the b1 dipoles:
Sects = []
ini = 0
for i in range(len(b)//b_nrsegs):
fim = b[(i+1)*b_nrsegs-1] + 1
Sects.append(list(range(ini, fim)))
ini = fim
# Names of the subsections:
sub_secs = ['U', 'D']
for i, sec in enumerate(Sects, 1):
# conditions that define change in subsection name:
# define changes to ''
sec_b = [x for x in b if sec[0] <= x <= sec[-1]]
relev_inds = [sec_b[-1]]
# define changes to '' and D
sec_qf = [x for x in qf if sec[0] <= x <= sec[-1]]
relev_inds += [sec_qf[-1]]
relev_inds.sort()
# fill the section_map variable
ref = 0
for j in sec:
section_map[(ind_shift+j) % len(lat)] = "{0:02d}".format(i)
section_map[(ind_shift+j) % len(lat)] += sub_secs[ref]
if j >= relev_inds[ref]:
ref += 1
return section_map
|
52a8352f6e8747ee6f6f9a1c85b34f551fd04dad
| 29,202 |
import math
def from_quaternion(quaternions, name=None):
"""Converts quaternions to Euler angles.
Args:
quaternions: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a normalized quaternion.
name: A name for this op that defaults to "euler_from_quaternion".
Returns:
A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents
the three Euler angles.
"""
def general_case(r00, r10, r21, r22, r20, eps_addition):
"""Handles the general case."""
theta_y = -tf.asin(r20)
sign_cos_theta_y = safe_ops.nonzero_sign(tf.cos(theta_y))
r00 = safe_ops.nonzero_sign(r00) * eps_addition + r00
r22 = safe_ops.nonzero_sign(r22) * eps_addition + r22
theta_z = tf.atan2(r10 * sign_cos_theta_y, r00 * sign_cos_theta_y)
theta_x = tf.atan2(r21 * sign_cos_theta_y, r22 * sign_cos_theta_y)
return tf.stack((theta_x, theta_y, theta_z), axis=-1)
def gimbal_lock(r01, r02, r20, eps_addition):
"""Handles Gimbal locks."""
sign_r20 = safe_ops.nonzero_sign(r20)
r02 = safe_ops.nonzero_sign(r02) * eps_addition + r02
theta_x = tf.atan2(-sign_r20 * r01, -sign_r20 * r02)
theta_y = -sign_r20 * tf.constant(math.pi / 2.0, dtype=r20.dtype)
theta_z = tf.zeros_like(theta_x)
angles = tf.stack((theta_x, theta_y, theta_z), axis=-1)
return angles
with tf.compat.v1.name_scope(name, "euler_from_quaternion", [quaternions]):
quaternions = tf.convert_to_tensor(value=quaternions)
shape.check_static(
tensor=quaternions,
tensor_name="quaternions",
has_dim_equals=(-1, 4))
x, y, z, w = tf.unstack(quaternions, axis=-1)
tx = safe_ops.safe_shrink(2.0 * x, -2.0, 2.0, True)
ty = safe_ops.safe_shrink(2.0 * y, -2.0, 2.0, True)
tz = safe_ops.safe_shrink(2.0 * z, -2.0, 2.0, True)
twx = tx * w
twy = ty * w
twz = tz * w
txx = tx * x
txy = ty * x
txz = tz * x
tyy = ty * y
tyz = tz * y
tzz = tz * z
# The following is clipped due to numerical instabilities that can take some
# enties outside the [-1;1] range.
r00 = safe_ops.safe_shrink(1.0 - (tyy + tzz), -1.0, 1.0, True)
r10 = safe_ops.safe_shrink(txy + twz, -1.0, 1.0, True)
r21 = safe_ops.safe_shrink(tyz + twx, -1.0, 1.0, True)
r22 = safe_ops.safe_shrink(1.0 - (txx + tyy), -1.0, 1.0, True)
r20 = safe_ops.safe_shrink(txz - twy, -1.0, 1.0, True)
r01 = safe_ops.safe_shrink(txy - twz, -1.0, 1.0, True)
r02 = safe_ops.safe_shrink(txz + twy, -1.0, 1.0, True)
eps_addition = asserts.select_eps_for_addition(quaternions.dtype)
general_solution = general_case(r00, r10, r21, r22, r20, eps_addition)
gimbal_solution = gimbal_lock(r01, r02, r20, eps_addition)
# The general solution is unstable close to the Gimbal lock, and the gimbal
# solution is not toooff in these cases.
is_gimbal = tf.less(tf.abs(tf.abs(r20) - 1.0), 1.0e-6)
gimbal_mask = tf.stack((is_gimbal, is_gimbal, is_gimbal), axis=-1)
return tf.where(gimbal_mask, gimbal_solution, general_solution)
|
4f00f734599699aaadf5c66a1f8e1457321bd689
| 29,203 |
def is_polygonal(n, num):
"""
Predicate for if num is a n-gonal number.
Works for all n >= 3 and num >= 1.
"""
if n < 3 or num < 1:
return False
t = int((sqrt(8*num*(n-2) + (n-4)**2) + (n-4)) / (2 * (n-2)))
return poly(n, t) == num
|
f3abde644544c05da17faeb9891916428b265602
| 29,204 |
def map2pix(geoTransform, x, y):
""" transform map coordinates to local image coordinates
Parameters
----------
geoTransform : tuple, size=(6,1)
georeference transform of an image.
x : np.array, size=(m), ndim={1,2,3}, dtype=float
horizontal map coordinate.
y : np.array, size=(m), ndim={1,2,3}, dtype=float
vertical map coordinate.
Returns
-------
i : np.array, ndim={1,2,3}, dtype=float
row coordinate(s) in local image space
j : np.array, ndim={1,2,3}, dtype=float
column coordinate(s) in local image space
See Also
--------
pix2map
Notes
-----
Two different coordinate system are used here:
.. code-block:: text
indexing | indexing ^ y
system 'ij'| system 'xy' |
| |
| i | x
--------+--------> --------+-------->
| |
| |
image | j map |
based v based |
"""
if isinstance(x, np.ndarray):
assert x.shape==y.shape, ('arrays should be of the same size')
assert isinstance(geoTransform, tuple), ('geoTransform should be a tuple')
# # offset the center of the pixel
# x -= geoTransform[1] / 2.0
# y -= geoTransform[5] / 2.0
# ^- this messes-up python with its pointers....
j = x - geoTransform[0]
i = y - geoTransform[3]
if geoTransform[2] == 0:
j = j / geoTransform[1]
else:
j = (j / geoTransform[1]
+ i / geoTransform[2])
if geoTransform[4] == 0:
i = i / geoTransform[5]
else:
i = (j / geoTransform[4]
+ i / geoTransform[5])
return i, j
|
bd044a4ec6d1b97f304086c21d18485b70afbee2
| 29,205 |
def checkmarkers(tubemarker, tubechecker):
"""Check for required markers in each tube
The tube-specific markers that are to be merged are desceribed in constants.py
Args:
tubemarker: required markers for that tube
tubechecker: markers in the given tube that needs to be validated
Returns:
True if all the required markers are found, False otherwise
"""
check = True
marker_list = list(map(map_elements, tubemarker))
tubechecker = list(map(map_elements, tubechecker))
for m in marker_list:
if m not in tubechecker:
if any(xs in m for xs in EMPTY_MARKER_NAMES):
continue
else:
return False
return check
|
5004a9158db93164dbcf024d6e06d832bf35cf30
| 29,207 |
def _in_Kexp(z):
""" Returns true if z is in the exponential cone """
alpha, beta, delta = z
if ((beta > 0) and (delta > 0)
and (np.log(delta) >= np.log(beta) + alpha / beta)) \
or ((alpha <= 0) and (np.abs(beta) < 1e-12) and (delta >= 0)):
return True
else:
return False
|
19e57dbb10e420ead5ce02e9801cd1e087f3afad
| 29,208 |
def split_blocks(bytestring, block_size):
"""Splits bytestring in block_size-sized blocks.
Raises an error if len(string) % blocksize != 0.
"""
if block_size == 1:
return map(b_chr, bytearray(bytestring))
rest_size = len(bytestring) % block_size
if rest_size:
raise ValueError("Input 'bytestring' must be a multiple of "
"block_size / segment_size (CFB mode) in length")
block_count = len(bytestring) // block_size
return (
bytestring[i * block_size:((i + 1) * block_size)]
for i in range(block_count))
|
f85caf419de35c75d5d920d531519b2f641cc7b3
| 29,209 |
def put_newest_oldest_files(f, author, path_earliest_latest, n_files, is_newest):
"""Write a report of files that were least recently changed by `author` (`is_newest` is False),
or was first most least recently changed by `author` in current revision.
f: file handle to write to
path_earliest_latest: {path: (earliest, latest)}
n_files: Max number of files to write
is_newest: Write newest commits if True, otherwise oldest commits
"""
def put(s):
f.write('%s\n' % s)
if is_newest:
def date_key(path):
return path_earliest_latest[path][0]
else:
def date_key(path):
return path_earliest_latest[path][1]
paths_by_date = sorted(path_earliest_latest.keys(), key=date_key)
if is_newest:
paths_by_date.reverse()
put('=' * 80)
put('%s: %d files' % (author, len(path_earliest_latest)))
for i, path in enumerate(paths_by_date[:n_files]):
earliest, latest = path_earliest_latest[path]
put('%3d: %s %s %s' % (i, date_str(earliest), date_str(latest), path))
|
19aa6a9f45d41f04d72b5699ac1599a2b8c2aa28
| 29,210 |
def pearson_correlation(trajectory_data):
""" Calculates the Pearson Correlation Matrix for node pairs
Usage: node_correlation, node_variance, node_average = pearson_correlation(trajectory_data)
Arguments:
trajectory_data: multidimensional numpy array; first index (rows) correspond to timestep, second index correspond to positions of each node;
Returns:
node_correlation: a nNodes x nNodes square matrix (numpy array) filled with the Pearson Correlation Coefficients for all node pairs
node_variance: one dimensional numpy array containing the variances of the data
node_average: one dimensional numpy array containing the averages of the data
"""
# ----------------------------------------
# CALCULATING THE AVERAGE OF TRAJECTORY DATA
# ----------------------------------------
nSteps = len(trajectory_data)
nSteps_range = range(nSteps)
nNodes = len(trajectory_data[0])
nNodes_range = range(nNodes)
for ts in nSteps_range:
# removing center of geometry translational motion
center_of_geometry = np.mean(trajectory_data[ts])
trajectory_data[ts] -= center_of_geometry
# no rotations to worry about...
node_average = np.sum(trajectory_data,axis=0)/nSteps
# ----------------------------------------
# PREPARE NUMPY ARRAYS
# ----------------------------------------
node_variance = np.zeros(nNodes,dtype=np.float64)
node_covariance = np.zeros((nNodes,nNodes),dtype=np.float64)
node_correlation = np.zeros((nNodes,nNodes),dtype=np.float64)
# ----------------------------------------
# CALCULATING PEARSON CORRELATION COEFFICIENT MATRIX
# ----------------------------------------
for ts in nSteps_range:
for i in nNodes_range:
node_variance[i] += trajectory_data[ts,i]**2
for j in nNodes_range[i:]:
node_covariance[i,j] += trajectory_data[ts,i]*trajectory_data[ts,j]
node_variance /= nSteps
node_variance -= node_average**2
node_covariance /= nSteps
for i in nNodes_range:
for j in nNodes_range[i:]:
node_covariance[i,j] -= node_average[i]*node_average[j]
node_correlation[i,j] = node_covariance[i,j]/np.sqrt(node_variance[i]*node_variance[j])
node_correlation[j,i] = node_correlation[i,j]
# ----------------------------------------
# OUTPUT OF AVERAGE, VARIANCE, COVARIANCE, AND CORRELATION MATRICES
# ----------------------------------------
np.savetxt('node_positional_average.dat',node_average)
np.savetxt('node_positional_variance.dat',node_variance)
np.savetxt('node_positional_covariance.dat',node_covariance)
np.savetxt('node_positional_correlation.dat',node_correlation)
# ----------------------------------------
# PLOTTING VARIANCE
# ----------------------------------------
plt.plot(nNodes_range,node_variance,'k')
plt.xlabel('Node Index',size=14)
plt.ylabel(r'Node Variance ($\AA^{2})',size=14)
plt.tight_layout()
plt.savefig('node_positional_variance.png',dpi=600,transparent=True)
plt.close()
# ----------------------------------------
# PLOTTING COVARIANCE
# ----------------------------------------
fig, ax = plt.subplots()
temp = plt.pcolormesh(nNodes_range,nNodes_range,node_covariance,cmap='Blues')
cb1 = plt.colorbar()
cb1.set_label(r'Node-Node Covariance ($AA^{2}$)')
xlabels = [str(int(x)) for x in temp.axes.get_xticks()[:]]
ylabels = [str(int(y)) for y in temp.axes.get_yticks()[:]]
temp.axes.set_xticks(temp.axes.get_xticks(minor=True)[:]+0.5,minor=True)
temp.axes.set_xticks(temp.axes.get_xticks()[:]+0.5)
temp.axes.set_yticks(temp.axes.get_yticks(minor=True)[:]+0.5,minor=True)
temp.axes.set_yticks(temp.axes.get_yticks()[:]+0.5)
temp.axes.set_xticklabels(xlabels)
temp.axes.set_yticklabels(ylabels)
plt.xlim((-0.5,nNodes+0.5))
plt.ylim((-0.5,nNodes+0.5))
plt.xlabel('Node Index',size=14)
plt.ylabel('Node Index',size=14)
ax.set_aspect('equal')
plt.tight_layout()
plt.savefig('node_positional_covariance.png',dpi=600,transparent=True)
plt.close()
# ----------------------------------------
# PLOTTING CORRELATION
# ----------------------------------------
fig, ax = plt.subplots()
temp = plt.pcolormesh(nNodes_range,nNodes_range,node_correlation,cmap='bwr',vmin=-1.0,vmax=1.0)
cb1 = plt.colorbar()
cb1.set_label('Node-Node Correlation')
xlabels = [str(int(x)) for x in temp.axes.get_xticks()[:]]
ylabels = [str(int(y)) for y in temp.axes.get_yticks()[:]]
temp.axes.set_xticks(temp.axes.get_xticks(minor=True)[:]+0.5,minor=True)
temp.axes.set_xticks(temp.axes.get_xticks()[:]+0.5)
temp.axes.set_yticks(temp.axes.get_yticks(minor=True)[:]+0.5,minor=True)
temp.axes.set_yticks(temp.axes.get_yticks()[:]+0.5)
temp.axes.set_xticklabels(xlabels)
temp.axes.set_yticklabels(ylabels)
plt.xlim((-0.5,nNodes+0.5))
plt.ylim((-0.5,nNodes+0.5))
plt.xlabel('Node Index',size=14)
plt.ylabel('Node Index',size=14)
ax.set_aspect('equal')
plt.tight_layout()
plt.savefig('node_positional_correlation.png',dpi=600,transparent=True)
plt.close()
return node_correlation, node_variance, node_average
|
c12f4a4fd0959424e6ccfd66c55046a0bcc93cdb
| 29,211 |
from typing import Optional
def upper_band(df: 'Dataframe', metric_col: str, rolling_window: Optional[int] = 20) -> pd.DataFrame:
"""Calculates the lower bound of a stock's price movements.
Args:
df: the dataframe to append a column onto
metric_col: the column to calculate over (usually the 'Close' price)
rolling_window: the time window to calculate over
Returns:
the original dataframe with the lower bound appended
**Example**
.. code-block:: python
from wsbtrading import maths
df_mapped = maths.upper_band(df=df, metric_col='Close')
"""
rolling_window_string = str(rolling_window)
df = sma(df=df, metric_col=metric_col, rolling_window=rolling_window)
df = rolling_stddev(df=df, metric_col=metric_col, rolling_window=rolling_window)
df['upper_band'] = df[f'{rolling_window_string}sma'] + (2 * df[f'{rolling_window_string}stddev'])
return df
|
f831e95ca2027ccedf787178dc630e7e60403ce3
| 29,214 |
import torch
def reparametisation_trick(mu, log_var, device):
"""
:param mu: The mean of the latent variable to be formed (nbatch, n_z)
:param log_var: The log variance of the latent variable to be formed (nbatch, n_z)
:param device: CPU or GPU
:return: latent variable (nbatch, n_z)
"""
noise = torch.normal(mean=0, std=1.0, size=log_var.shape).to(torch.device(device))
z = mu + torch.mul(torch.exp(log_var / 2.0), noise)
return z
|
9cb646132f49fa79b6a8690d10fd188968931978
| 29,215 |
async def prefix_wrapper_async_callable(prefix_factory, re_flags, message):
"""
Function to execute asynchronous callable prefix.
This function is a coroutine.
Parameters
----------
prefix_factory : `async-callable`
Async callable returning the prefix.
re_flags : `int`
Regex matching flags.
message : ``Message``
The received message to parse the prefix from.
Returns
-------
prefix : `None`, `str`
The prefix used by the user. Returned as `None` of parsing failed.
end : `int`
The start of the content after the prefix. Returned as `-1` if parsing failed.
"""
prefix = await prefix_factory(message)
if isinstance(prefix, str):
escaped_prefix = re_escape(prefix)
elif isinstance(prefix, tuple) and (len(prefix) > 0):
escaped_prefix = '|'.join(re_escape(prefix_part) for prefix_part in prefix)
else:
return None, -1
content = message.content
if content is None:
prefix = None
end = -1
else:
parsed = re_match(escaped_prefix, content, re_flags)
if parsed is None:
prefix = None
end = -1
else:
prefix = parsed.group(0)
end = parsed.end()
return prefix, end
|
d09499b4808a24bb643ae904a46049e89c77b7f3
| 29,217 |
def draw_mask(img0, img1, mask, size=14, downscale_ratio=1):
"""
Args:
img: color image.
mask: 14x28 mask data.
size: mask size.
Returns:
display: image with mask.
"""
resize_imgs = []
resize_imgs.append(cv2.resize(
img0, (int(img0.shape[1] * downscale_ratio), int(img0.shape[0] * downscale_ratio))))
resize_imgs.append(cv2.resize(
img1, (int(img1.shape[1] * downscale_ratio), int(img1.shape[0] * downscale_ratio))))
masks = []
masks.append(ndimage.binary_fill_holes(np.reshape(mask[:size * size], (size, size))))
masks.append(ndimage.binary_fill_holes(np.reshape(mask[size * size:], (size, size))))
for idx, val in enumerate(masks):
h_interval = np.ceil(float(resize_imgs[idx].shape[0]) / val.shape[0])
w_interval = np.ceil(float(resize_imgs[idx].shape[1]) / val.shape[1])
for i in range(resize_imgs[idx].shape[0]):
for j in range(resize_imgs[idx].shape[1]):
p = int(np.floor(i / h_interval))
q = int(np.floor(j / w_interval))
if val[p, q]:
resize_imgs[idx][i, j, 0] = 255
display = np.concatenate(resize_imgs, axis=1)
return display
|
82149bd4fb9a313f76e029fb3234e6aff32cad2e
| 29,218 |
def sine_data_generation(no, seq_len, dim):
"""Sine data generation.
Args:
- no: the number of samples
- seq_len: sequence length of the time-series
- dim: feature dimensions
Returns:
- data: generated data
"""
# Initialize the output
data = list()
# Generate sine data
for i in range(no):
# Initialize each time-series
temp = list()
# For each feature
for k in range(dim):
# Randomly drawn frequency and phase
freq = np.random.uniform(0, 0.1)
phase = np.random.uniform(0, 0.1)
# Generate sine signal based on the drawn frequency and phase
temp_data = [np.sin(freq * j + phase) for j in range(seq_len)]
temp.append(temp_data)
# Align row/column
temp = np.transpose(np.asarray(temp))
# Normalize to [0,1]
temp = (temp + 1) * 0.5
# Stack the generated data
data.append(temp)
return data
|
1d363cce8788b62f84ab3fd05b11ff98cf5719a3
| 29,220 |
def apply_wilcoxon_test(wide_optimal, dep_var, OVRS_NAMES, alpha):
"""Performs a Wilcoxon signed-rank test"""
pvalues = []
for ovr in OVRS_NAMES:
mask = np.repeat(True, len(wide_optimal))
pvalues.append(
wilcoxon(
wide_optimal.loc[mask, ovr], wide_optimal.loc[mask, dep_var]
).pvalue
)
wilcoxon_results = pd.DataFrame(
{
"Oversampler": OVRS_NAMES,
"p-value": pvalues,
"Significance": np.array(pvalues) < alpha,
}
)
return wilcoxon_results
|
a1a219c7b1bb6f917da11e5fe35c6992ccc60a8c
| 29,221 |
import torch
def get_accuracy(targets, outputs, k=1, ignore_index=None):
""" Get the accuracy top-k accuracy between two tensors.
Args:
targets (1 - 2D :class:`torch.Tensor`): Target or true vector against which to measure
saccuracy
outputs (1 - 3D :class:`torch.Tensor`): Prediction or output vector
ignore_index (int, optional): Specifies a target index that is ignored
Returns:
:class:`tuple` consisting of accuracy (:class:`float`), number correct (:class:`int`) and
total (:class:`int`)
Example:
>>> import torch
>>> from torchnlp.metrics import get_accuracy
>>> targets = torch.LongTensor([1, 2, 3, 4, 5])
>>> outputs = torch.LongTensor([1, 2, 2, 3, 5])
>>> accuracy, n_correct, n_total = get_accuracy(targets, outputs, ignore_index=3)
>>> accuracy
0.8
>>> n_correct
4
>>> n_total
5
"""
n_correct = 0.0
for target, output in zip(targets, outputs):
if not torch.is_tensor(target) or is_scalar(target):
target = torch.LongTensor([target])
if not torch.is_tensor(output) or is_scalar(output):
output = torch.LongTensor([[output]])
predictions = output.topk(k=min(k, len(output)), dim=0)[0]
for prediction in predictions:
if torch_equals_ignore_index(
target.squeeze(), prediction.squeeze(), ignore_index=ignore_index):
n_correct += 1
break
return n_correct / len(targets), int(n_correct), len(targets)
|
df7f60f37abd9e85b63ca616fb086b84a6ae17d9
| 29,222 |
def cameraPs2Ts(cameraPOs):
"""
convert multiple POs to Ts.
----------
input:
cameraPOs: list / numpy
output:
cameraTs: list / numpy
"""
if type(cameraPOs) is list:
N = len(cameraPOs)
else:
N = cameraPOs.shape[0]
cameraT_list = []
for _cameraPO in cameraPOs:
cameraT_list.append(__cameraP2T__(_cameraPO))
return cameraT_list if type(cameraPOs) is list else np.stack(cameraT_list)
|
10d6fb11a244eded26b4c9b989e88c131832357b
| 29,223 |
def dimerization_worker(primer_1, primer_2):
""" Returns the total number of complementary bases and the longest
run of complementary bases (weighted by HYBRID_SCORES), the median
length of all runs and the array of complementary bases.
"""
p1 = [set(AMB[i]) for i in primer_1]
p2 = [set(AMB[i]) for i in primer_2]
complementary = [
max(
[HYBRID_SCORES[base] for base in s1.intersection(s2)],
default=0) for s1, s2 in zip(p1, p2)]
total = sum(complementary)
complementary_runs = [list(comp) for run, comp in groupby(
complementary, key=lambda x: x > 0)]
max_run = sum(max(complementary_runs, key=sum))
run_lens = [sum(c) for c in complementary_runs if sum(c) > 0]
if run_lens:
run_lens.append(0)
median = np.median(run_lens) if run_lens else 0
return (total, max_run, median, complementary)
|
6bd1fd8a990c35f8cd0cde134714c9d4a19cfdb1
| 29,224 |
from skimage import img_as_ubyte
def load_dataset(ds, elsize=[], axlab='',
outlayout='', dtype='',
dataslices=None, uint8conv=False):
"""Load data from a proxy and select/transpose/convert/...."""
slices = get_slice_objects(dataslices, ds.shape)
data = slice_dataset(ds, slices)
if list(axlab) != list(outlayout):
in2out = [axlab.index(l) for l in outlayout]
data = np.transpose(data, in2out)
elsize = np.array(elsize)[in2out]
axlab = outlayout
slices = [slices[i] for i in in2out]
if dtype:
data = data.astype(dtype, copy=False)
if uint8conv:
data = normalize_data(data)[0]
data = img_as_ubyte(data)
return data, elsize, axlab, slices
|
8ccbc4d3c42bcf0861daec23b7b2410b91b89c5c
| 29,225 |
import copy
def _interpret_err_lines(err_specs, ncols, names=None):
"""Give list of column names from the READ SERR and TERR commands
Parameters
----------
err_specs : dict
``{'serr': [n0, n1, ...], 'terr': [n2, n3, ...]}``
Error specifications for symmetric and two-sided errors
ncols : int
Number of data columns
Other parameters
----------------
names : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
Returns
-------
colnames : list
List containing the column names. Error columns will have the name
of the main column plus ``_err`` for symmetric errors, and ``_perr``
and ``_nerr`` for positive and negative errors respectively
Examples
--------
>>> col_in = ['MJD', 'Rate']
>>> cols = _interpret_err_lines(None, 2, names=col_in)
>>> cols[0]
'MJD'
>>> err_specs = {'terr': [1], 'serr': [2]}
>>> ncols = 5
>>> cols = _interpret_err_lines(err_specs, ncols, names=col_in)
>>> cols[0]
'MJD'
>>> cols[2]
'MJD_nerr'
>>> cols[4]
'Rate_err'
>>> _interpret_err_lines(err_specs, 6, names=col_in)
Traceback (most recent call last):
...
ValueError: Inconsistent number of input colnames
"""
colnames = ["" for i in range(ncols)]
if err_specs is None:
serr_cols = terr_cols = []
else:
# I don't want to empty the original one when using `pop` below
err_specs = copy.deepcopy(err_specs)
serr_cols = err_specs.pop("serr", [])
terr_cols = err_specs.pop("terr", [])
if names is not None:
all_error_cols = len(serr_cols) + len(terr_cols) * 2
if all_error_cols + len(names) != ncols:
raise ValueError("Inconsistent number of input colnames")
shift = 0
for i in range(ncols):
col_num = i + 1 - shift
if colnames[i] != "":
continue
colname_root = f"col{col_num}"
if names is not None:
colname_root = names[col_num - 1]
colnames[i] = f"{colname_root}"
if col_num in serr_cols:
colnames[i + 1] = f"{colname_root}_err"
shift += 1
continue
if col_num in terr_cols:
colnames[i + 1] = f"{colname_root}_perr"
colnames[i + 2] = f"{colname_root}_nerr"
shift += 2
continue
assert not np.any([c == "" for c in colnames])
return colnames
|
a3ba0960a3711b30c46e8fc75237786d5297f5eb
| 29,226 |
def mro_hasattr(cls: type, attr: str) -> bool:
"""Check if an attribute exists in a type's class hierarchy
Args:
cls (type): The type
attr (str): The attribute
Returns:
bool: True if has the attribute.
Raises:
TypeError: Not called on a type
"""
if not isinstance(cls, type):
raise TypeError(f"mro_getattr can only be used on types, got {type(cls)}")
for klass in cls.mro()[1:]:
if hasattr(klass, attr):
return True
return False
|
cfc41693e3d3321bcb63dae079abf2e768f97905
| 29,227 |
def get_channel_number_from_frequency(frequency):
"""gets the 802.11 channel for a corresponding frequency
in units of kilohertz (kHz). does not support FHSS."""
try:
return _20MHZ_CHANNEL_LIST.get(frequency, "Unknown")
except KeyError:
return "Unknown"
|
0867a458e98a5a97b3d8925aeeee14f6f5af58a5
| 29,228 |
def fetch_single_minutely_equity(code, start, end):
"""
从本地数据库读取单个股票期间分钟级别交易明细数据
**注意**
交易日历分钟自9:31~11:30 13:01~15:00
在数据库中,分钟级别成交数据分日期存储
Parameters
----------
code : str
要获取数据的股票代码
start_date : datetime-like
自开始日期(包含该日)
end_date : datetime-like
至结束日期
return
----------
DataFrame: OHLCV列的DataFrame对象。
Examples
--------
>>> stock_code = '000333'
>>> start = '2020-06-29'
>>> end = pd.Timestamp('2020-06-30')
>>> df = fetch_single_minutely_equity(stock_code, start, end)
>>> df.tail()
close high low open volume
2018-04-19 14:56:00 51.55 51.56 51.50 51.55 376400
2018-04-19 14:57:00 51.55 51.55 51.55 51.55 20000
2018-04-19 14:58:00 51.55 51.55 51.55 51.55 0
2018-04-19 14:59:00 51.55 51.55 51.55 51.55 0
2018-04-19 15:00:00 51.57 51.57 51.57 51.57 353900
"""
calendar = get_calendar('XSHG')
fmt = r"%Y-%m-%d"
dates = calendar.sessions_in_range(
start.strftime(fmt), end.strftime(fmt)).tz_localize(None)
cols = ['open', 'high', 'low', 'close', 'volume']
# 指数分钟级别数据
if len(code) == 7:
return _index_minute_data(code, dates)
db = get_db('wy_quotes')
func = partial(_fetch_single_minutely_equity,
stock_code=code, db=db, is_index=False)
with ThreadPoolExecutor(MAX_WORKER) as executor:
dfs = executor.map(func, dates)
return pd.concat(dfs).sort_index()
|
cc125997fe2f0313295732235b1df2e886d3fcad
| 29,230 |
def method2():
"""Provide an examples of doc strings that are too long.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
""" # noqa W505: doc line too long (127 > 100 characters) (auto-generated noqa)
return 7
|
689c50c2cfb62d39cd35eec125813830c6068fdb
| 29,232 |
def get_parent_compartment_ocid(teamname):
"""
Retrieves the OCID for the compartment based on the team name (assuming the model of root -- team -- individual
structure of compartments.
Args:
teamname (str): name of the team level compartment
Returns:
str: The OCId or None - None is only returned in the event of an internal error
"""
global logger
parent_compartment_ocid = None
try:
if (teamname != None):
parent_compartment_ocid = find (teamname, QRY_CONST.COMPARTMENT)
if (parent_compartment_ocid == None):
raise LookupError ("No compartment found")
else:
parent_compartment_ocid = config_props[CONFIG_CONST.TENANCY]
except LookupError as le:
logger.error ("Compartment lookup failed", le)
return parent_compartment_ocid
|
bec0bb1d98bb8da0c4e670efebcaa6adcfb8d494
| 29,233 |
def set_recommended_watch_points(session_id):
"""Set recommended watch points."""
body = _read_post_request(request)
request_body = body.get('requestBody')
if request_body is None:
raise ParamMissError('requestBody')
set_recommended = request_body.get('set_recommended')
reply = _wrap_reply(_session_manager.get_session(session_id).set_recommended_watch_points, set_recommended)
return reply
|
81f37e60ba108c2b59c79271342153ea8879697a
| 29,235 |
def epochJulian2JD(Jepoch):
"""
----------------------------------------------------------------------
Purpose: Convert a Julian epoch to a Julian date
Input: Julian epoch (nnnn.nn)
Returns: Julian date
Reference: See JD2epochJulian
Notes: e.g. 1983.99863107 converts into 2445700.5
Inverse of function JD2epochJulian
----------------------------------------------------------------------
"""
return (Jepoch-2000.0)*365.25 + 2451545.0
|
2738940ad390f979317177984c9120b34fa7d2af
| 29,236 |
def GetHighlightColour():
"""
Gets the default highlight color.
:rtype: :class:`wx.Colour`
"""
if wx.Platform == '__WXMAC__':
if CARBON:
if wx.VERSION < (2, 9, 0, 0, ''):
# kThemeBrushButtonPressedLightHighlight
brush = wx.Brush(wx.BLACK)
brush.MacSetTheme(Carbon.Appearance.kThemeBrushFocusHighlight)
return brush.GetColour()
else:
color = wx.MacThemeColour(Carbon.Appearance.kThemeBrushFocusHighlight)
return color
# Fallback to text highlight color
return wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT)
|
1654393d9b5f3d5ea610c9dae9706d04d7f37d54
| 29,238 |
import inspect
def get_classes(mod):
"""Return a list of all classes in module 'mod'"""
return [
key
for key, _ in inspect.getmembers(mod, inspect.isclass)
if key[0].isupper()
]
|
be04546650a6243a3abfe4053a4dcaa9d71f85d7
| 29,241 |
import struct
def ustring_to_string(ptr, length=None):
"""Convert a pointer to UTF-16 data into a Python string encoded with utf-8.
ptr and length are both gdb.Value objects.
If length is unspecified, will guess at the length."""
error_message = ''
if length is None:
length, error_message = guess_string_length(ptr)
else:
length = int(length)
char_vals = [int((ptr + i).dereference()) for i in range(length)]
string = struct.pack('H' * length, *char_vals).decode('utf-16', 'replace').encode('utf-8')
return string + error_message
|
9981d15eb26816fbc7f2cb0e3cac99b4d738c25a
| 29,242 |
import logging
def handle_outgoing(msg):
"""
Should return a requeue flag, so if it returns True, the message will be
requeued and processed again immediately, and if it returns False, it will
not be queued again.
"""
def onerror():
logging.exception("Exception while processing SMS %s" % msg._id)
backend = msg.outbound_backend
sms_interval = backend.get_sms_interval()
use_rate_limit = sms_interval is not None
use_load_balancing = (isinstance(backend, SMSLoadBalancingMixin) and
len(backend.phone_numbers) > 1)
if use_rate_limit or use_load_balancing:
client = cache_core.get_redis_client()
lbi = None
orig_phone_number = None
if use_load_balancing:
lbi = backend.get_next_phone_number(client)
orig_phone_number = lbi.phone_number
elif (isinstance(backend, SMSLoadBalancingMixin) and
len(backend.phone_numbers) == 1):
# If there's only one phone number, we don't need to go through the
# load balancing algorithm. But we should always pass an
# orig_phone_number if it's an instance of SMSLoadBalancingMixin.
orig_phone_number = backend.phone_numbers[0]
if use_rate_limit:
if use_load_balancing:
lock_key = "sms-backend-%s-rate-limit-phone-%s" % (backend._id,
lbi.phone_number)
else:
lock_key = "sms-backend-%s-rate-limit" % backend._id
lock = client.lock(lock_key, timeout=30)
if not use_rate_limit or (use_rate_limit and lock.acquire(blocking=False)):
if use_load_balancing:
lbi.finish(save_stats=True)
result = send_message_via_backend(msg, backend=backend,
orig_phone_number=orig_phone_number, onerror=onerror)
if use_rate_limit:
wait_and_release_lock(lock, sms_interval)
if result:
handle_successful_processing_attempt(msg)
else:
handle_unsuccessful_processing_attempt(msg)
return False
else:
# We're using rate limiting, but couldn't acquire the lock, so
# another thread is sending sms with this backend. Rather than wait,
# we'll just put this message at the back of the queue.
if use_load_balancing:
lbi.finish(save_stats=False)
return True
|
4b189ef37965ff5725a77af615b52bc019df5910
| 29,243 |
def add_entry(ynew: float, s: float, s2: float, n: int, calc_var: bool):
"""Adds an entry to the metrics, s, s2, and n.
s: previous value of sum of y[]
s2: previous value of sum of y[]*y[]
n: previous number of entries in the metric
"""
n = n + 1
s = s + ynew
s2 = s2 + ynew * ynew
if calc_var:
var = (s2 - s * s / n) / n # This is sigma**2
else:
var = None
return s, s2, n, var
|
8aed2d9f5acb85273b1a152b0747156e49f1ebdc
| 29,244 |
def get_conversion_dict(conversion_name):
"""Retrieves a hard-coded label conversion dictionary.
When coarsening the label set of a task based on a predefined
conversion scheme like Penn Treebank tags to Universal PoS tags,
this function provides the map, out of a fixed list of known
maps addressed by a keyword string.
"""
if conversion_name == PTB_UNIVERSAL_CONVERSION_STRING:
return ptb_to_univ_map
elif conversion_name == WSD_COARSENING_CONVERSION_STRING:
return coarse_wsd_map
else:
raise ValueError("Unknown conversion name: {}".format(conversion_name))
|
9137a50e2f9900abf6d60b51c8bc44e67f13df86
| 29,245 |
import scipy
def discount_cumsum(x, discount):
"""
magic from rllab for computing discounted cumulative sums of vectors.
input:
vector x,
[x0,
x1,
x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
|
82bcb686840191b7cef650b30e14308393331fa2
| 29,246 |
def compute_rgb_scales(alpha_thres=0.9):
"""Computes RGB scales that match predicted albedo to ground truth,
using just the first validation view.
"""
config_ini = configutil.get_config_ini(FLAGS.ckpt)
config = ioutil.read_config(config_ini)
# First validation view
vali_dir = join(config_ini[:-4], 'vis_vali')
data_root = config.get('DEFAULT', 'data_root')
epoch_dirs = xm.os.sortglob(vali_dir, 'epoch?????????')
epoch_dir = epoch_dirs[-1]
batch_dirs = xm.os.sortglob(epoch_dir, 'batch?????????')
batch_dir = batch_dirs[0]
# Find GT path
metadata_path = join(batch_dir, 'metadata.json')
metadata = xm.io.json.load(metadata_path)
view = metadata['id']
pred_path = join(batch_dir, 'pred_albedo.png')
gt_path = join(data_root, view, 'albedo.png')
# Load prediction and GT
pred = xm.io.img.read(pred_path) # gamma corrected
gt = xm.io.img.read(gt_path) # linear
pred = xm.img.normalize_uint(pred)
gt = xm.img.normalize_uint(gt)
pred = pred ** 2.2 # undo gamma
gt = xm.img.resize(gt, new_h=pred.shape[0], method='tf')
alpha = gt[:, :, 3]
gt = gt[:, :, :3]
# Compute color correction scales, in the linear space
is_fg = alpha > alpha_thres
opt_scale = []
for i in range(3):
x_hat = pred[:, :, i][is_fg]
x = gt[:, :, i][is_fg]
scale = x_hat.dot(x) / x_hat.dot(x_hat)
opt_scale.append(scale)
opt_scale = tf.convert_to_tensor(opt_scale, dtype=tf.float32)
return opt_scale
|
44341b8c878ca02179ded0a3cc1c173f1eaea009
| 29,247 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.