content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def collaspe_fclusters(data=None, t=None, row_labels=None, col_labels=None,
linkage='average', pdist='euclidean', standardize=3, log=False):
"""a function to collaspe flat clusters by averaging the vectors within
each flat clusters achieved from hierarchical clustering"""
## preprocess data
if log:
data = np.log2(data + 1.0)
if standardize == 1: # Standardize along the columns of data
data = zscore(data, axis=0)
elif standardize == 2: # Standardize along the rows of data
data = zscore(data, axis=1)
if row_labels is not None and col_labels is None: ## only get fclusters for rows
d = dist.pdist(data, metric=pdist)
axis = 1 ##!!! haven't checked whether this is correct yet
elif row_labels is None and col_labels is not None: ## only get fclusters for cols
d = dist.pdist(data.T, metric=pdist)
axis = 0
D = dist.squareform(d)
Y = sch.linkage(D, method=linkage, metric=pdist)
fclusters = sch.fcluster(Y, t, 'distance')
fcluster_set = set(fclusters)
data_cf = []
for fc in fcluster_set:
mask = np.where(fclusters==fc)
data_t = data.T
vector_avg = np.average(data_t[mask],axis=axis)
data_cf.append(vector_avg)
data_cf = np.array(data_cf).T
return data_cf | 879ba2e9469831b096f716dbbb38047580d76844 | 19,968 |
from typing import Union
def iapproximate_add_fourier_state(self,
lhs: Union[int, QuantumRegister],
rhs: QRegisterPhaseLE,
qcirc: QuantumCircuit,
approximation: int = None) -> ApproximateAddFourierStateGate:
"""Substract two registers with rhs in quantum fourier state."""
if isinstance(lhs, QuantumRegister):
self._check_qreg(lhs)
self._check_dups([lhs, rhs])
self._check_qreg(rhs)
return self._attach(ApproximateAddFourierStateGate(lhs, rhs, qcirc, approximation).inverse()) | 3e1ccb2576e8babdb589c60aec51f585001bdd9a | 19,969 |
import itertools
def _get_indices(A):
"""Gets the index for each element in the array."""
dim_ranges = [range(size) for size in A.shape]
if len(dim_ranges) == 1:
return dim_ranges[0]
return itertools.product(*dim_ranges) | dc2e77c010a6cfd7dbc7b7169f4bd0d8da62b891 | 19,970 |
def calculateOriginalVega(f, k, r, t, v, cp):
"""计算原始vega值"""
price1 = calculatePrice(f, k, r, t, v*STEP_UP, cp)
price2 = calculatePrice(f, k, r, t, v*STEP_DOWN, cp)
vega = (price1 - price2) / (v * STEP_DIFF)
return vega | 7b90662003231b50c4d758c3a7beb122b90c05e7 | 19,971 |
def to_numpy(tensor):
""" Converts a PyTorch Tensor to a Numpy array"""
if isinstance(tensor, np.ndarray):
return tensor
if hasattr(tensor, 'is_cuda'):
if tensor.is_cuda:
return tensor.cpu().detach().numpy()
if hasattr(tensor, 'detach'):
return tensor.detach().numpy()
if hasattr(tensor, 'numpy'):
return tensor.numpy()
return np.array(tensor) | c5186918fe7a07054607df500d61b32ae1b0037f | 19,973 |
def _maybe_to_dense(obj):
"""
try to convert to dense
"""
if hasattr(obj, 'to_dense'):
return obj.to_dense()
return obj | a2f18aec19bd0bad58a35e772180b94d649262e1 | 19,976 |
def update_visit_counter(visit_counter_matrix, observation, action):
"""Update the visit counter
Counting how many times a state-action pair has been
visited. This information can be used during the update.
@param visit_counter_matrix a matrix initialised with zeros
@param observation the state observed
@param action the action taken
"""
x = observation[0]
y = observation[1]
z = observation[2]
visit_counter_matrix[x,y,z,action] += 1.0
return visit_counter_matrix | 418097d34f194c81e38e3d6b122ae743c7b73452 | 19,977 |
from datetime import datetime
def pandas_time_safe(series):
"""Pandas check time safe"""
return (series.map(dt_seconds)
if isinstance(series.iloc[0], datetime.time)
else series) | f802d7ad4cd9c9dbf426b2c1436c41402b24da0b | 19,978 |
def binary_cross_entropy_loss(predicted_y, true_y):
"""Compute the binary cross entropy loss between a vector of labels of size N and a vector of probabilities of same
size
Parameters
----------
predicted_y : numpy array of shape (N, 1)
The predicted probabilities
true_y : numpy array of shape (N, )
The true labels
Returns
-------
binary_cross_entropy_loss
a numpy array of shape (N, )
"""
return -np.log(np.squeeze(predicted_y))*true_y - np.log(1 - np.squeeze(predicted_y))*(1 - true_y) | ba72db9051976a9d07355a1b246a22faea43b2b1 | 19,979 |
def money_flow_index(high, low, close, volume, n=14, fillna=False):
"""Money Flow Index (MFI)
Uses both price and volume to measure buying and selling pressure. It is
positive when the typical price rises (buying pressure) and negative when
the typical price declines (selling pressure). A ratio of positive and
negative money flow is then plugged into an RSI formula to create an
oscillator that moves between zero and one hundred.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:money_flow_index_mfi
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
volume(pandas.Series): dataset 'Volume' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
# 0 Prepare dataframe to work
df = pd.DataFrame([high, low, close, volume]).T
df.columns = ['High', 'Low', 'Close', 'Volume']
df['Up_or_Down'] = 0
df.loc[(df['Close'] > df['Close'].shift(1)), 'Up_or_Down'] = 1
df.loc[(df['Close'] < df['Close'].shift(1)), 'Up_or_Down'] = 2
# 1 typical price
tp = (df['High'] + df['Low'] + df['Close']) / 3.0
# 2 money flow
mf = tp * df['Volume']
# 3 positive and negative money flow with n periods
df['1p_Positive_Money_Flow'] = 0.0
df.loc[df['Up_or_Down'] == 1, '1p_Positive_Money_Flow'] = mf
n_positive_mf = df['1p_Positive_Money_Flow'].rolling(n).sum()
df['1p_Negative_Money_Flow'] = 0.0
df.loc[df['Up_or_Down'] == 2, '1p_Negative_Money_Flow'] = mf
n_negative_mf = df['1p_Negative_Money_Flow'].rolling(n).sum()
# 4 money flow index
mr = n_positive_mf / n_negative_mf
mr = (100 - (100 / (1 + mr)))
if fillna:
mr = mr.fillna(50)
return pd.Series(mr, name='mfi_'+str(n)) | bd2cbb7b18c7be8d5c0ec0a984c4f3cadf295eec | 19,980 |
def parse_args(args=[], doc=False):
"""
Handle parsing of arguments and flags. Generates docs using help from `ArgParser`
Args:
args (list): argv passed to the binary
doc (bool): If the function should generate and return manpage
Returns:
Processed args and a copy of the `ArgParser` object if not `doc` else a `string` containing the generated manpage
"""
parser = ArgParser(prog=__COMMAND__, description=f"{__COMMAND__} - {__DESCRIPTION__}")
parser.add_argument("file")
parser.add_argument("--version", action="store_true", help=f"print program version")
args = parser.parse_args(args)
arg_helps_with_dups = parser._actions
arg_helps = []
[arg_helps.append(x) for x in arg_helps_with_dups if x not in arg_helps]
NAME = f"**NAME*/\n\t{__COMMAND__} - {__DESCRIPTION__}"
SYNOPSIS = f"**SYNOPSIS*/\n\t{__COMMAND__} [OPTION]... "
DESCRIPTION = f"**DESCRIPTION*/\n\t{__DESCRIPTION_LONG__}\n\n"
for item in arg_helps:
# Its a positional argument
if len(item.option_strings) == 0:
# If the argument is optional:
if item.nargs == "?":
SYNOPSIS += f"[{item.dest.upper()}] "
elif item.nargs == "+":
SYNOPSIS += f"[{item.dest.upper()}]... "
else:
SYNOPSIS += f"{item.dest.upper()} "
else:
# Boolean flag
if item.nargs == 0:
if len(item.option_strings) == 1:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\t{item.help}\n\n"
else:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\n\t\t{item.help}\n\n"
elif item.nargs == "+":
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/=[{item.dest.upper()}]...\n\t\t{item.help}\n\n"
else:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/={item.dest.upper()}\n\t\t{item.help}\n\n"
if doc:
return f"{NAME}\n\n{SYNOPSIS}\n\n{DESCRIPTION}\n\n"
else:
return args, parser | 355d8e8722171ab64ebdc02d1fe39db7521a497b | 19,981 |
def gaussian_kernel_dx_i_dx_j(x, y, sigma=1.):
""" Matrix of \frac{\partial k}{\partial x_i \partial x_j}"""
assert(len(x.shape) == 1)
assert(len(y.shape) == 1)
d = x.size
pairwise_dist = np.outer(y-x, y-x)
x_2d = x[np.newaxis,:]
y_2d = y[np.newaxis,:]
k = gaussian_kernel(x_2d, y_2d, sigma)
term1 = k*pairwise_dist * (2.0/sigma)**2
term2 = k*np.eye(d) * (2.0/sigma)
return term1 - term2 | 626f38a5a5e1e7c7dd98c92636a424c74fc7146b | 19,982 |
from pathlib import Path
import tarfile
def clone_compressed_repository(base_path, name):
"""Decompress and clone a repository."""
compressed_repo_path = Path(__file__).parent / "tests" / "fixtures" / f"{name}.tar.gz"
working_dir = base_path / name
bare_base_path = working_dir / "bare"
with tarfile.open(compressed_repo_path, "r") as fixture:
fixture.extractall(str(bare_base_path))
bare_path = bare_base_path / name
repository_path = working_dir / "repository"
repository = Repo(bare_path, search_parent_directories=True).clone(repository_path)
return repository | bbd733b079ebedb91687597180b0f98825f6ed6c | 19,983 |
def slices(series, length):
"""
Given a string of digits, output all the contiguous substrings
of length n in that string in the order that they appear.
:param series string - string of digits.
:param length int - the length of the series to find.
:return list - List of substrings of specified length from series.
"""
if len(series) < length:
raise ValueError("Length requested is shorter than series.")
if length < 1:
raise ValueError("Length requested is less than 1.")
substrings = []
for index, number in enumerate(series):
sub = series[index:index + length]
if len(sub) == length:
substrings.append(sub)
return substrings | ea2d1caf26a3fc2e2a57858a7364b4ebe67297d6 | 19,984 |
from where.models.delay import gnss_range # Local import to avoid cyclical import
def get_flight_time(dset):
"""Get flight time of GNSS signal between satellite and receiver
Args:
dset(Dataset): Model data
Return:
numpy.ndarray: Flight time of GNSS signal between satellite and receiver in [s]
"""
# Get geometric range between satellite and receiver position
geometric_range = gnss_range.gnss_range(dset)
return geometric_range / constant.c | 503bfb55fc10bef9f610291aa0f35e0530c8b0f2 | 19,985 |
def textToTuple(text, defaultTuple):
"""This will convert the text representation of a tuple into a real
tuple. No checking for type or number of elements is done. See
textToTypeTuple for that.
"""
# first make sure that the text starts and ends with brackets
text = text.strip()
if text[0] != '(':
text = '(%s' % (text,)
if text[-1] != ')':
text = '%s)' % (text,)
try:
returnTuple = eval('tuple(%s)' % (text,))
except Exception:
returnTuple = defaultTuple
return returnTuple | 89fed32bff39ad9e69513d7e743eb05a3bf7141a | 19,986 |
def m2m_bi2uni(m2m_list):
""" Splits a bigram word model into a unique unigram word model
i=11, j=3 i=10, j=3 i=9,10,11,12, j=3,4,5,6
###leilatem### ###leilatem### ###leilatem###
###temum### ###temum### ###temum###
^ ^ ^^^^ m: mismatch
m m MMMm M: match
"""
q = Queue(maxsize=2)
phonemes_list = []
while len(m2m_list): # NOTE can be optmised removing this while
while not q.full():
bigram = m2m_list.pop(0)
q.put(PADD + bigram + PADD)
curr_word = q.get()
next_word = q.get()
i = len(curr_word) - 1 - len(PADD) # to decrease backwards
j = len(PADD) # to increase forward
unmatch_count = 0
match = False
#print(curr_word, '***********************************')
#print(next_word, '***********************************')
while not match:
# scan the first section: mismatch (m)
while curr_word[i] != next_word[j]:
#print('%-6s %-6s %02d %02d <- bi2uni' % (curr_word[i],
# next_word[j], i, j))
i -= 1
unmatch_count += 1
#print('%-6s %-6s' % (curr_word[i], next_word[j]))
# gambiarra master to avoid mismatches like in 's e j s'
if unmatch_count == 0 and not is_vowel(curr_word[i][0]):
i -= 1
unmatch_count += 1
continue
#print('possible match')
for k in range(unmatch_count + len(PADD)):
# scan the second section: a match (M)
if curr_word[i + k] == next_word[j + k]:
continue
else:
# found third section: right mismatch with PADD (m)
if curr_word[i + k] == '#': # check immediate mismatch
match = True
#print('match! ->', end=' ')
#print(curr_word[len(PADD):i])
else:
#print('houston we have a problem: (%s, %s)' %
# (curr_word[i + k], next_word[j + k]))
i -= 1
unmatch_count += 1
break
phonemes_list.append(curr_word[len(PADD):i])
q.put(next_word)
phonemes_list.append(next_word[len(PADD):j + k])
phonemes_list.append(next_word[j + k:-len(PADD)])
return phonemes_list | 37f1644dc16bc0e4dd47acd7a69f1c2d6fbfc6d5 | 19,987 |
import time
def time_func(func):
"""Times how long a function takes to run.
It doesn't do anything clever to avoid the various pitfalls of timing a function's runtime.
(Interestingly, the timeit module doesn't supply a straightforward interface to run a particular
function.)
"""
def timed(*args, **kwargs):
start = time.time()
func(*args, **kwargs)
end = time.time()
return end - start
return timed | 3506ad28c424434402f3223a43daff4eb51b7763 | 19,988 |
def GetPhiPsiChainsAndResiduesInfo(MoleculeName, Categorize = True):
"""Get phi and psi torsion angle information for residues across chains in
a molecule containing amino acids.
The phi and psi angles are optionally categorized into the following groups
corresponding to four types of Ramachandran plots:
General: All residues except glycine, proline, or pre-proline
Glycine: Only glycine residues
Proline: Only proline residues
Pre-Proline: Only residues before proline not including glycine or proline
Arguments:
MoleculeName (str): Name of a PyMOL molecule object.
Returns:
dict: A dictionary containing sorted list of residue numbers for each
chain and dictionaries of residue names, phi and psi angles for each
residue number.
Examples:
PhiPsiInfoMap = GetPhiPsiChainsAndResiduesInfo(MolName)
for ChainID in PhiPsiInfoMap["ChainIDs"]:
for ResNum in PhiPsiInfoMap["ResNums"][ChainID]:
ResName = PhiPsiInfoMap["ResName"][ChainID][ResNum]
Phi = PhiPsiInfoMap["Phi"][ChainID][ResNum]
Psi = PhiPsiInfoMap["Psi"][ChainID][ResNum]
Category = PhiPsiInfoMap["Category"][ChainID][ResNum]
MiscUtil.PrintInfo("ChainID: %s; ResNum: %s; ResName: %s; Phi: %8.2f;
Psi: %8.2f; Category: %s" % (ChainID, ResNum, ResName, Phi,
Psi, Category))
"""
if not len(MoleculeName):
return None
SelectionCmd = "%s" % (MoleculeName)
PhiPsiResiduesInfoMap = _GetSelectionPhiPsiChainsAndResiduesInfo(SelectionCmd, Categorize)
return PhiPsiResiduesInfoMap | 07295d99f3f2150e4a9e0782bf376ac1aa22a499 | 19,989 |
def generate_data(n_samples=30):
"""Generate synthetic dataset. Returns `data_train`, `data_test`,
`target_train`."""
x_min, x_max = -3, 3
x = rng.uniform(x_min, x_max, size=n_samples)
noise = 4.0 * rng.randn(n_samples)
y = x ** 3 - 0.5 * (x + 1) ** 2 + noise
y /= y.std()
data_train = pd.DataFrame(x, columns=["Feature"])
data_test = pd.DataFrame(
np.linspace(x_max, x_min, num=300), columns=["Feature"])
target_train = pd.Series(y, name="Target")
return data_train, data_test, target_train | f7d2f5637327119d5f08fe2ccbfe2d4f41a34c5c | 19,990 |
def get_element_event(element_key):
"""
Get object's event.
"""
model = apps.get_model(settings.WORLD_DATA_APP, "event_data")
return model.objects.filter(trigger_obj=element_key) | bd177573035209e97110a2213cbe98b3b2eadafb | 19,991 |
import operator
def get_seller_price(sellers,seller_id,core_request):
"""
sellers is a list of list where each list contains follwing item in order
1. Seller Name
2. Number of available cores
3. Price of each core
4. List of lists where length of main list is equal to number of cores. Length of minor list will be zero.
seller_id is the seller index whose price to be determined.
You can access this seller by seller[seller_id]
core_request is the number of core requested
return the total price of this deal using second price auction
if seller_id is with largest ask then return its own price
"""
new_list = list(sellers)
new_list.sort(key=operator.itemgetter(2))
i=0;
for x in new_list:
if x==sellers[seller_id]:
break
i+=1
#print i
if i==len(sellers)-1:
return new_list[i][2]*core_request
else :
price=0
core=core_request
price=0
while core>0:
i+=1
if i==len(sellers)-1:
price+=core*new_list[i][2]
core=0
else:
if core>new_list[i][1]:
core=core-new_list[i][1]
price+=new_list[i][1]*new_list[i][2]
else:
price+=core*new_list[i][2]
core=0
return price | a1103b05409cdab20dd1982f5839a712939c3c3f | 19,992 |
def create_affiliation_ttl(noun_uri: str, noun_text: str, affiliated_text: str, affiliated_type: str) -> list:
"""
Creates the Turtle for an Affiliation.
@param noun_uri: String holding the entity/URI to be affiliated
@param noun_text: String holding the sentence text for the entity
@param affiliated_text: String specifying the entity (organization, group, etc.) to which the
noun is affiliated
@param affiliated_type: String specifying the class type of the entity
@return: An array of strings holding the Turtle representation of the Affiliation
"""
affiliated_uri = f':{affiliated_text.replace(" ", "_")}'
affiliation_uri = f'{noun_uri}{affiliated_text.replace(" ", "_")}Affiliation'
noun_str = f"'{noun_text}'"
ttl = [f'{affiliation_uri} a :Affiliation ; :affiliated_with {affiliated_uri} ; :affiliated_agent {noun_uri} .',
f'{affiliation_uri} rdfs:label "Relationship based on the text, {noun_str}" .',
f'{affiliated_uri} a {affiliated_type} ; rdfs:label "{affiliated_text}" .']
wikidata_desc = get_wikipedia_description(affiliated_text)
if wikidata_desc:
ttl.append(f'{affiliated_uri} :definition "{wikidata_desc}" .')
return ttl | d641a5aa77860dad48c605b3486bc83c0250d551 | 19,993 |
from typing import Tuple
def get_subpixel_indices(col_num: int) -> Tuple[int, int, int]:
"""Return a 3-tuple of 1-indexed column indices representing subpixels of a single pixel."""
offset = (col_num - 1) * 2
red_index = col_num + offset
green_index = col_num + offset + 1
blue_index = col_num + offset + 2
return red_index, blue_index, green_index | cb4a1b9a4d27c3a1dad0760267e6732fe2d0a0da | 19,994 |
def sigmoid(x):
""" This function computes the sigmoid of x for NeuralNetwork"""
return NN.sigmoid(x) | 534391dc7b39aede21e6a66692bc1ca2ea1ce8b6 | 19,995 |
def extractTranslatingSloth(item):
"""
'Translating Sloth'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
tagmap = [
('娘子我才是娃的爹', 'Wife, I Am the Baby\'s Father', 'translated'),
('Wife, I Am the Baby\'s Father', 'Wife, I Am the Baby\'s Father', 'translated'),
('I want to eat meat Wife', 'I want to eat meat Wife', 'translated'),
('My Lord is a Stone', 'My Lord is a Stone', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | 0ed9f5d4ae4c69fae2dc46e0260e29d1c97225af | 19,996 |
def human(number: int, suffix='B') -> str:
"""Return a human readable memory size in a string.
Initially written by Fred Cirera, modified and shared by Sridhar Ratnakumar
(https://stackoverflow.com/a/1094933/6167478), edited by Victor Domingos.
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(number) < 1024.0:
return f"{number:3.1f} {unit}{suffix}"
number = number / 1024.0
return f"{number:.1f}{'Yi'}{suffix}" | b41e9014ee7afbacb40115f85223ae89b08094a8 | 19,997 |
def _get_field_names(field: str, aliases: dict):
"""
Override this method to customize how
:param field:
:param aliases:
:return:
"""
trimmed = field.lstrip("-")
alias = aliases.get(trimmed, trimmed)
return alias.split(",") | cb732c07018c33a546bf42ab1bf3516d2bd6c824 | 19,998 |
def get_answer(question_with_context):
"""
Get answer for question and context.
"""
# Create pipeline
question_answering_pipeline = pipeline('question-answering')
# Get answer
answer = question_answering_pipeline(question_with_context)
# Return answer
return answer | ba560ecf5aa07a59b697465e0c34c8b32ddf64e6 | 19,999 |
import numpy
def undiskify(z):
"""Maps SL(2)/U(1) poincare disk coord to Lie algebra generator-factor."""
# Conventions match (2.13) in https://arxiv.org/abs/1909.10969
return 2* numpy.arctanh(abs(z)) * numpy.exp(1j * numpy.angle(z)) | 9ac4cd521ca64decd082a34e35e0d080d3190e13 | 20,000 |
def mean_vertex_normals(vertex_count,
faces,
face_normals,
**kwargs):
"""
Find vertex normals from the mean of the faces that contain
that vertex.
Parameters
-----------
vertex_count : int
The number of vertices faces refer to
faces : (n, 3) int
List of vertex indices
face_normals : (n, 3) float
Normal vector for each face
Returns
-----------
vertex_normals : (vertex_count, 3) float
Normals for every vertex
Vertices unreferenced by faces will be zero.
"""
def summed_sparse():
# use a sparse matrix of which face contains each vertex to
# figure out the summed normal at each vertex
# allow cached sparse matrix to be passed
if 'sparse' in kwargs:
sparse = kwargs['sparse']
else:
sparse = index_sparse(vertex_count, faces)
summed = sparse.dot(face_normals)
return summed
def summed_loop():
# loop through every face, in tests was ~50x slower than
# doing this with a sparse matrix
summed = np.zeros((vertex_count, 3))
for face, normal in zip(faces, face_normals):
summed[face] += normal
return summed
try:
summed = summed_sparse()
except BaseException:
log.warning(
'unable to generate sparse matrix! Falling back!',
exc_info=True)
summed = summed_loop()
# invalid normals will be returned as zero
vertex_normals = util.unitize(summed)
return vertex_normals | 767214b5c2ba701de5288009ee4ebfb90378446c | 20,001 |
def get_beam_jobs():
"""Returns the list of all registered Apache Beam jobs.
Returns:
list(BeamJob). The list of registered Apache Beam jobs.
"""
return [beam_job_domain.BeamJob(j) for j in jobs_registry.get_all_jobs()] | 24e22d487fdbdb02917011e94a6d5b985de67640 | 20,002 |
def z_norm(dataset, max_seq_len=50):
"""Normalize data in the dataset."""
processed = {}
text = dataset['text'][:, :max_seq_len, :]
vision = dataset['vision'][:, :max_seq_len, :]
audio = dataset['audio'][:, :max_seq_len, :]
for ind in range(dataset["text"].shape[0]):
vision[ind] = np.nan_to_num(
(vision[ind] - vision[ind].mean(0, keepdims=True)) / (np.std(vision[ind], axis=0, keepdims=True)))
audio[ind] = np.nan_to_num(
(audio[ind] - audio[ind].mean(0, keepdims=True)) / (np.std(audio[ind], axis=0, keepdims=True)))
text[ind] = np.nan_to_num(
(text[ind] - text[ind].mean(0, keepdims=True)) / (np.std(text[ind], axis=0, keepdims=True)))
processed['vision'] = vision
processed['audio'] = audio
processed['text'] = text
processed['labels'] = dataset['labels']
return processed | 8cf40069b2a8c042d357fab3b1e3aaf13c15c69e | 20,003 |
def days_upto(year):
"""
Return the number of days from the beginning of the test period to the
beginning of the year specified
"""
return sum([days_in_year(y) for y in range(2000,year)]) | f87295a53d839e2ce895ef5fe5490b77377d28eb | 20,004 |
def choose_field_size():
"""a function that crafts a field"""
while True:
print('Пожалуйста, задайте размер поля (число от 3 до 5):')
try:
field_size = int(input())
except ValueError:
continue
if field_size == 3:
print('\nПоле для игры:\n')
rows = {'a': 1, 'b': 2, 'c': 3}
columns = [1, 2, 3]
field = [[[' '], [' '], [' ']], [[' '], [' '], [' ']], [[' '], [' '], [' ']]]
rows_name = ['a', 'b', 'c']
print(' 1 2 3\n')
for row_num in range(len(field)):
print(rows_name[row_num], sep='', end='')
for cell in field[row_num]:
print(cell[0], '|', sep='', end='')
print('\n --------------', sep='', end='')
print('\n')
break
elif field_size == 4:
print(""" 1 2 3 4
a | | |
--------------
b | | |
--------------
c | | |
--------------
d | | |""")
break
elif field_size == 5:
print(""" 1 2 3 4 5
a | | | |
------------------
b | | | |
------------------
c | | | |
------------------
d | | | |
------------------
e | | | |""")
break
else:
continue
return field, rows, columns | ed370aca1f13a9f93bb96e483885c67e1bd30317 | 20,006 |
def delete_submission_change(id):
"""Delete a post.
Ensures that the post exists and that the logged in user is the
author of the post.
"""
db = get_db()
db.execute('DELETE FROM submission_change WHERE id = ?', (id,))
db.commit()
return jsonify(status='ok') | a886dfd89939ca10d95877bd16bda313ccb9353d | 20,007 |
from datetime import datetime
def get_search_response(db, search_term):
"""Method to get search result from db or google api.
Args:
db: The database object.
search_term: The search term.
Returns:
String: List of relevant links separated by line break.
"""
# Find if the search results for the term is stored in mongo.
response = (
db["SearchResults"].find_one(
{
"searchTerm": search_term
}
) or {}
).get("result")
if not response:
# Fetch search results from Google API if not found in mongo.
response = get_google_search_response(search_term)
# Cache the results in mongo where lastSearchedOn is a TTL index with timeout of 3600 seconds.
db["SearchResults"].insert_one(
{
"searchTerm": search_term,
"lastSearchedOn": datetime.now(),
"result": response
}
)
return response | 11180ad1ee57d2a778439fd260d5201d3723cfe7 | 20,008 |
def lattice_2d_rescale_wave_profile(kfit, X, dT, Z_C, Y_C, v, dx=1.):
"""
Fit the wave profile (X, dT) to the ODE solution (X_C, dT_C)
"""
# recenter the profile around 0
k0 = np.argmax(dT)
x0 = X[k0]
Z = kfit*(X.copy()-x0)
# retain a window corresponding to the input ODE solution
zlo = max(np.min(Z_C), np.min(Z))
zhi = min(np.max(Z_C), np.max(Z))
idx = (Z >= zlo) & (Z <= zhi)
Z = Z[idx]
Y = dT.copy()[idx]
if (len(Z) > len(Z_C)):
raise ValueError("Increase resolution of ODE solution!")
# rescale Y
Y /= (v*kfit/2.)
return Z, Y | 8caea59a092efd9632e4b705400f40aa0ebbec44 | 20,009 |
def extent_switch_ijk_kji(
extent_in: npt.NDArray[np.int_]) -> npt.NDArray[np.int_]: # reverse order of elements in extent
"""Returns equivalent grid extent switched either way between simulator and python protocols."""
dims = extent_in.size
result = np.zeros(dims, dtype = 'int')
for d in range(dims):
result[d] = extent_in[dims - d - 1]
return result | c960591fa1f3b31bd0877fd75845a17fff8eff50 | 20,010 |
def create_data(f, x_vals):
"""Assumes f is a function of one argument
x_vals is an array of suitable arguments for f
Returns array containing results of applying f to the
elements of x_vals"""
y_vals = []
for i in x_vals:
y_vals.append(f(x_vals[i]))
return np.array(y_vals) | 5f74402586c3f7d02c8d6146d5256dbccdf49e81 | 20,011 |
def register():
"""注册"""
req_dict = request.get_json()
phone = req_dict.get("phone")
password = req_dict.get("password")
password2 = req_dict.get("password2")
sms_code = req_dict.get("sms_code")
phone = str(phone)
sms_code = str(sms_code)
# 校验参数
if not all([phone, password, password2, sms_code]):
return jsonify(code=400, msg="参数不完整")
if password != password2:
return jsonify(code=400, msg="两次密码不一致")
# 从redis中取出短信验证码
try:
real_sms_code = redis_store.get("sms_code_%s" % phone)
except Exception as e:
current_app.logger.error(e)
return jsonify(code=4001, msg="读取真实短信验证码异常")
# 判断短信验证码是否过期
if real_sms_code is None:
return jsonify(code=4002, msg="短信验证码失效")
# 删除redis中的短信验证码,防止重复使用校验
try:
redis_store.delete("sms_code_%s" % phone)
except Exception as e:
current_app.logger.error(e)
# 判断用户填写短信验证码的正确性
if real_sms_code != sms_code:
return jsonify(code=4003, msg="短信验证码错误")
# 判断用户的手机是否注册过
try:
user = User.query.filter_by(phone=phone).first()
except Exception as e:
current_app.logger.error(e)
return jsonify(code=400, msg="数据库异常")
else:
if user is not None:
# 表示已被注册
return jsonify(code=400, msg="手机已被注册")
# 保存用户的注册数据到数据库中
avatar = constant.ADMIN_AVATAR_URL # 用户头像
user = User(username=phone, phone=phone, password=password, avatar=avatar)
try:
db.session.add(user)
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error(e)
return jsonify(code=400, msg="查询数据库异常")
# 保存登录状态到session中
session["username"] = phone
session["phone"] = phone
session["user_id"] = user.id
session["avatar"] = user.avatar
# 返回结果
return jsonify(code=200, msg="注册成功") | f9e6bc8dc30cb967843d0f47fada1b4b62c6b130 | 20,012 |
def cpp_flag(compiler):
"""Return the -std=c++[11/14/17] compiler flag.
The newer version is prefered over c++11 (when it is available).
"""
flags = ["-std=c++17", "-std=c++14", "-std=c++11"]
for flag in flags:
if has_flag(compiler, flag):
return flag
raise RuntimeError(
"Unsupported compiler -- at least C++11 support is needed!"
) | a21a0a8efcad62cc26ff033877c366d7d6acf09d | 20,013 |
def to_null(string):
"""
Usage::
{{ string|to_null}}
"""
return 'null' if string is None else string | 1868ca2c7474a8134f2dbb0b0e542ca659bf4940 | 20,014 |
import warnings
def get_mtime(path, mustExist=True):
"""
Get mtime of a path, even if it is inside a zipfile
"""
warnings.warn("Don't use this function", DeprecationWarning)
try:
return zipio.getmtime(path)
except IOError:
if not mustExist:
return -1
raise | 6c661fb5d7a874a8173ec509b07401e2120da95b | 20,015 |
def get_table_8():
"""表 8 主たる居室の照明区画݅に設置された照明設備の調光による補正係数
Args:
Returns:
list: 表 8 主たる居室の照明区画݅に設置された照明設備の調光による補正係数
"""
table_8 = [
(0.9, 1.0),
(0.9, 1.0),
(1.0, 1.0)
]
return table_8 | 89470f0242982755104dbb2afe0198e2f5afa5f4 | 20,016 |
import requests
import warnings
def query_epmc(query):
"""
Parameters
----------
query :
Returns
-------
"""
url = "https://www.ebi.ac.uk/europepmc/webservices/rest/search?query="
page_term = "&pageSize=999" ## Usual limit is 25
request_url = url + query + page_term
r = requests.get(request_url)
if r.status_code == 200:
return r
else:
warnings.warn("request to " + str(query) + " has failed to return 200, and has returned " + str(r.status_code))
pass | a8da1ee3253d51738f1d556548f6bccf17b32b53 | 20,017 |
def is_default_array_type(f, type_map=TYPE_MAP):
"""
Check whether the field is an array and is made up of default types, e.g. u8 or s16.
"""
return f.type_id == 'array' and type_map.get(f.options['fill'].value, None) | ec0e7a26261cc72e473d2c365bc452b2eeab396f | 20,018 |
def delete_police_station_collection():
"""
Helper function to delete station collection in db.
"""
result = PoliceStation.objects().delete()
return result | 7b3cc89269695fa494eb12a7b904fabd1974f3d8 | 20,019 |
def compute_asvspoof_tDCF(
asv_target_scores,
asv_nontarget_scores,
asv_spoof_scores,
cm_bonafide_scores,
cm_spoof_scores,
cost_model,
):
"""
Compute t-DCF curve as in ASVSpoof2019 competition:
Fix ASV threshold to EER point and compute t-DCF curve over thresholds in CM.
Code for this is mainly taken from the ASVSpoof2019 competition t-DCF implementation:
https://www.asvspoof.org/
Parameters:
asv_target_scores (ndarray): Array of ASV target (bonafide) scores (should be high)
asv_nontarget_scores (ndarray): Array of ASV nontarget (bonafide) scores (should be low)
asv_spoof_scores (ndarray): Array of ASV spoof scores (should be low)
cm_bonafide_scores (ndarray): Array of CM target (bonafide) scores (should be high)
cm_spoof_scores (ndarray): Array of CM nontarget (spoof) scores (should be low)
cost_model (CostParameters): CostParameters object containing cost parameters
Returns:
tdcf_curve (ndarray): Array of normalized t-DCF values at different CM thresholds
cm_thresholds (ndarray): Array of different CM thresholds, corresponding to
values in tdcf_curve.
"""
# Fix ASV FAR and miss to values at EER (with legit samples)
asv_frr, asv_far, asv_thresholds = compute_det(asv_target_scores, asv_nontarget_scores)
asv_frr_eer, asv_far_eer, asv_eer_threshold = compute_eer(asv_frr, asv_far, asv_thresholds)
p_asv_miss = asv_frr_eer
p_asv_fa = asv_far_eer
# Fraction of spoof samples that were rejected by asv.
# Note that speaker labels are not used here, just raw number
# of spoof samples rejected by asv in general
p_asv_spoof_miss = np.sum(asv_spoof_scores < asv_eer_threshold) / len(asv_spoof_scores)
# Copy/pasta from t-DCF implementation in ASVSpoof2019 competition
# Obtain miss and false alarm rates of CM
p_cm_miss, p_cm_fa, cm_thresholds = compute_det(cm_bonafide_scores, cm_spoof_scores)
# See ASVSpoof2019 evaluation plan for more information on these
C1 = cost_model.p_tar * (cost_model.c_cm_miss - cost_model.c_asv_miss * p_asv_miss) - \
cost_model.p_nontar * cost_model.c_asv_fa * p_asv_fa
# Cost for CM false-accept:
# How often we have spoof samples *
# Cost of accepting a spoof *
# how often ASV accepts spoof
C2 = cost_model.c_cm_fa * cost_model.p_spoof * (1 - p_asv_spoof_miss)
# Obtain t-DCF curve for all thresholds
tDCF = C1 * p_cm_miss + C2 * p_cm_fa
# Normalized t-DCF
tDCF_norm = tDCF
if min(C1, C2) == 0:
tDCF_norm = tDCF
else:
tDCF_norm = tDCF / np.minimum(C1, C2)
return tDCF_norm, cm_thresholds | 27819737d7a1a84db10d78cce4c5edd16548e774 | 20,020 |
def all_divisor(n, includeN=True):
"""
>>> all_divisor(28)
[1, 2, 4, 7, 14, 28]
>>> all_divisor(28, includeN=False)
[1, 2, 4, 7, 14]
Derived from https://qiita.com/LorseKudos/items/9eb560494862c8b4eb56
"""
lower_divisors, upper_divisors = [], []
i = 1
while i * i <= n:
if n % i == 0:
lower_divisors.append(i)
if i != n // i:
upper_divisors.append(n//i)
i += 1
upper_divisors = upper_divisors[::-1]
if not includeN:
upper_divisors.pop()
return lower_divisors + upper_divisors | 2fa0eb58eac30030cfbbfdcce62bc91cb36f218e | 20,021 |
def isTrue(value, noneIsFalse=True):
"""
Returns True if <value> is one of the valid string representations for True.
By default, None is considered False.
"""
if not value:
if noneIsFalse:
return False
else:
return None
else:
return value.lower() in TRUE_STRINGS | 16e69dc43ef2034d210803e8cc3ebf2ae13e13b2 | 20,022 |
import pickle
def read_doc_labels(input_dir):
"""
:param input_dir:
:return: doc labels
"""
with open(input_dir + "doc_labels.pkl", 'rb') as fin:
labels = pickle.load(fin)
return labels | c0246f8e09441782a7437177877cc1e4d83ecb40 | 20,023 |
def search(catalog_number):
"""
A top level `catalog_number` search that returns a list of result dicts.
Usually catalog numbers are unique but not always hence the returned list.
"""
results = query(catalog_number)
result_list = []
for result in results:
dict_result = vars(result)["data"]
result_list.append(result_filter(dict_result))
return result_list | 2a8ce325250cbaa5a9f307ef707c74c5101d84d3 | 20,025 |
def calculate_mean_probas(time_ser, model):
"""Calculate the metric to evaluate based on average probabilities
Args:
time_ser (np.ndarray): dynophore time series
model (HMM): Fitted HMM
Returns:
np.float: Probability of prediting the given time series based on the fitted model
Model
"""
probas = model.predict_proba(time_ser)
states = model.predict(time_ser)
prob_ser = np.zeros(probas.shape)
for i in range(len(states)):
prob_ser[i, states[i]] = probas[i, states[i]]
return np.mean(np.mean(prob_ser, axis=0)) | b8320a24c01e56c89b5d706630190a118d803ffa | 20,027 |
def compute_t(i, automata_list, target_events):
"""
Compute alphabet needed for processing L{automata_list}[i-1] in the
sequential abstraction procedure.
@param i: Number of the automaton in the L{automata_list}
@type i: C{int} in range(1, len(automata_list)+1)
@param automata_list: List of automata
@type automata_list: C{list} of L{Automaton}
@param target_events: List of events to preserve after abstraction
@type target_events: C{set} of L{Event}
@return: New alphabet for the next step in sequential abstraction
@rtype: C{set} of L{Event}
"""
processed = set()
for j in range(0, i):
processed = processed.union(automata_list[j].alphabet)
unprocessed = target_events.copy()
for j in range(i, len(automata_list)):
unprocessed = unprocessed.union(automata_list[j].alphabet)
result = processed.intersection(unprocessed)
processed.clear()
unprocessed.clear()
return result | 88fc64aaf917d23a29e9400cf29705e6b20665c3 | 20,028 |
def cal_min_sim(y):
"""Calculate the minimal value given multiple trajectories from different isomers"""
y = y.copy()
if len(y.shape) == 2: # add one more dimension if only two provided
y = y[np.newaxis, :]
n_sim, nT, nP = y.shape
y_min_sim = np.min(y, axis = 0)
return y_min_sim | efbee1f3d8a88ac447609019a431d3ac6469f2cf | 20,029 |
async def create_rsa_key(
hub,
ctx,
name,
vault_url,
key_ops=None,
enabled=None,
expires_on=None,
not_before=None,
tags=None,
**kwargs,
):
"""
.. versionadded:: 2.0.0
Create a new RSA key or, if name is already in use, create a new version of the key. Requires the keys/create
permission. Key properties can be specified as keyword arguments.
:param name: The name of the new key. Key names can only contain alphanumeric characters and dashes.
:param vault_url: The URL of the vault that the client will access.
:param key_ops: A list of permitted key operations. Possible values include: 'decrypt', 'encrypt', 'sign',
'unwrap_key', 'verify', 'wrap_key'.
:param enabled: Whether the key is enabled for use.
:param expires_on: When the key will expire, in UTC. This parameter must be a string representation of a Datetime
object in ISO-8601 format.
:param not_before: The time before which the key can not be used, in UTC. This parameter must be a string
representation of a Datetime object in ISO-8601 format.
:param tags: Application specific metadata in the form of key-value pairs.
CLI Example:
.. code-block:: bash
azurerm.keyvault.key.create_rsa_key test_name test_vault
"""
result = {}
kconn = await hub.exec.azurerm.keyvault.key.get_key_client(ctx, vault_url, **kwargs)
try:
key = kconn.create_rsa_key(
name=name,
key_operations=key_ops,
enabled=enabled,
expires_on=expires_on,
not_before=not_before,
tags=tags,
)
result = _key_as_dict(key)
except (KeyVaultErrorException, ValidationError, HttpResponseError) as exc:
result = {"error": str(exc)}
return result | f22a520bc82bed0447440a80639a1f6ef575e718 | 20,030 |
import re
def _parse_size_string(size):
"""
Parse a capacity string.
Takes a string representing a capacity and returns the size in bytes, as an
integer. Accepts strings such as "5", "5B", "5g", "5GB", " 5 GiB ", etc.
Case insensitive. See `man virsh` for more details.
:param size: The size string to parse.
:returns: The number of bytes represented by `size`, as an integer.
"""
# Base values for units.
BIN = 1024
DEC = 1000
POWERS = {"": 0, "k": 1, "m": 2, "g": 3, "t": 4}
# If an integer is passed, treat it as a string without units.
size = str(size).lower()
regex = r"\s*(\d+)\s*([%s])?(i?b)?\s*$" % "".join(POWERS.keys())
match = re.compile(regex).match(size)
if not match:
msg = "The size string '%s' is not of a valid format." % size
raise AnsibleFilterError(to_text(msg))
number = match.group(1)
power = match.group(2)
unit = match.group(3)
if not power:
power = ""
if unit == "b":
base = DEC
else:
base = BIN
return int(number) * (base ** POWERS[power]) | 6ad10ba10380eaa7a8acd6bbeb52b537fdcf3864 | 20,031 |
def get_exp_lr(base_lr, xs, power=4e-10):
"""Get learning rates for each step."""
ys = []
for x in xs:
ys.append(base_lr / np.exp(power*x**2))
return ys | 58486de08742d2467a4178d1ac0544c0d1f2055c | 20,033 |
import time
def dashboard():
""" Main dashboard function. Run stats across all accounts. """
start = time.time()
instance_count = 0
user_count = 0
sg_count = 0
elb_count = 0
aws_accounts = AwsAccounts()
accounts = aws_accounts.all()
pool = Pool(10)
results = pool.map(get_account_stats, accounts)
pool.close()
pool.join()
for acc_result in results:
instance_count += acc_result['InstanceCount']
user_count += acc_result['UserCount']
sg_count += acc_result['SecurityGroupCount']
elb_count += acc_result['ELBCount']
end = time.time()
result = dict(
Time=(end - start),
Summary=dict(
AccountsCount=len(accounts),
InstanceCount=instance_count,
UserCount=user_count,
SecurityGroupCount=sg_count,
ELBCount=elb_count))
return result | e5138d8527ecb5712db6205757432d31efde8f2b | 20,034 |
def strip_new_line(str_json):
"""
Strip \n new line
:param str_json: string
:return: string
"""
str_json = str_json.replace('\n', '') # kill new line breaks caused by triple quoted raw strings
return str_json | f2faaa80dca000586a32a37cdf3dff793c0a2d9b | 20,035 |
def fromAtoB(x1, y1, x2, y2, color='k', connectionstyle="arc3,rad=-0.4",
shrinkA=10, shrinkB=10, arrowstyle="fancy", ax=None):
"""
Draws an arrow from point A=(x1,y1) to point B=(x2,y2) on the (optional)
axis ``ax``.
.. note::
See matplotlib documentation.
"""
if ax is None:
return pl.annotate("",
xy=(x2, y2), xycoords='data',
xytext=(x1, y1), textcoords='data',
arrowprops=dict(
arrowstyle=arrowstyle, # linestyle="dashed",
color=color,
shrinkA=shrinkA, shrinkB=shrinkB,
patchA=None,
patchB=None,
connectionstyle=connectionstyle),
)
else:
return ax.annotate("",
xy=(x2, y2), xycoords='data',
xytext=(x1, y1), textcoords='data',
arrowprops=dict(
arrowstyle=arrowstyle, # linestyle="dashed",
color=color,
shrinkA=shrinkA, shrinkB=shrinkB,
patchA=None,
patchB=None,
connectionstyle=connectionstyle),
) | a7b14ae62d26f203da0fb3f26c7aa7652fb9a345 | 20,036 |
import torch
def exp(input_):
"""Wrapper of `torch.exp`.
Parameters
----------
input_ : DTensor
Input dense tensor.
"""
return torch.exp(input_._data) | 01449c87486a7145b26d313de7254cb784d94a7b | 20,037 |
from datetime import datetime
def get_lat_lon(fp, fs=FS):
"""
get lat lon values for concat dataset
"""
logger.info(f"{str(datetime.datetime.now())} : Retrieving lat lon")
with xr.open_dataset(fs.open(fp)) as ds:
lat, lon = ds["latitude"].values, ds["longitude"].values
logger.info(f"{str(datetime.datetime.now())} : Retrieved lat lon")
return lat, lon | a99614463121edb99c290ddea8d6bb7b298498f1 | 20,039 |
def one_hot_encode(vec, vals=10):
"""
For use to one-hot encode the 10- possible labels
"""
n = len(vec)
out = np.zeros((n, vals))
out[range(n), vec] = 1
return out | 079c4c505464659248631b3e5c3d1345557d922b | 20,040 |
def COUNTA(*args) -> Function:
"""
Returns a count of the number of values in a dataset.
Learn more: https//support.google.com/docs/answer/3093991
"""
return Function("COUNTA", args) | c8e876e80a0414eab915b6eb0efc9917b12edb19 | 20,041 |
import json
def decode_url_json_string(json_string):
"""
Load a string representing serialised json into
:param json_string:
:return:
"""
strings = json.loads(h.unescape(json_string),
object_pairs_hook=parse_json_pairs)
return strings | 6f616e5e6037024ebdab6e63aa90c13c60fca40c | 20,042 |
def svn_wc_merge2(*args):
"""
svn_wc_merge2(enum svn_wc_merge_outcome_t merge_outcome, char left,
char right, char merge_target, svn_wc_adm_access_t adm_access,
char left_label, char right_label,
char target_label, svn_boolean_t dry_run,
char diff3_cmd, apr_array_header_t merge_options,
apr_pool_t pool) -> svn_error_t
"""
return apply(_wc.svn_wc_merge2, args) | 271e596810b7ee604532f34612e349ae30b108c5 | 20,044 |
import torch
def test_finetuning_callback_warning(tmpdir):
"""Test finetuning callbacks works as expected."""
seed_everything(42)
class FinetuningBoringModel(BoringModel):
def __init__(self):
super().__init__()
self.backbone = nn.Linear(32, 2, bias=False)
self.layer = None
self.backbone.has_been_used = False
def training_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def forward(self, x):
self.backbone.has_been_used = True
x = self.backbone(x)
return x
def train_dataloader(self):
return DataLoader(RandomDataset(32, 64), batch_size=2)
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.parameters(), lr=0.1)
return optimizer
chk = ModelCheckpoint(dirpath=tmpdir, save_last=True)
model = FinetuningBoringModel()
model.validation_step = None
callback = TestBackboneFinetuningWarningCallback(unfreeze_backbone_at_epoch=3, verbose=False)
with pytest.warns(UserWarning, match="Did you init your optimizer in"):
trainer = Trainer(limit_train_batches=1, default_root_dir=tmpdir, callbacks=[callback, chk], max_epochs=2)
trainer.fit(model)
assert model.backbone.has_been_used
trainer = Trainer(max_epochs=3)
trainer.fit(model, ckpt_path=chk.last_model_path) | c7bec2c256ece471b0f3d9f56a74dcb2c7ad186a | 20,045 |
from contextlib import suppress
def idempotent(function):
"""Shallows 304 errors, making actions repeatable."""
@wraps(function)
def decorator(*args, **kwargs):
with suppress(GitlabCreateError):
return function(*args, **kwargs)
return decorator | 4012ce715a8344a7a9eb7e27a7d96f0e3b9c8f6d | 20,046 |
def newline_formatter(func):
"""
Wrap a formatter function so a newline is appended if needed to the output
"""
def __wrapped_func(*args, **kwargs):
"""
Wrapper function that appends a newline to result of original function
"""
result = func(*args, **kwargs)
# The result may be a string, or bytes. In python 2 they are the same, but in python 3, they are not.
# First, check for strings as that works the same in python 2 and 3, THEN check for bytes, as that
# implementation is python 3 specific. If it's neither (future proofing), we use a regular new line
line_ending = "\n"
if isinstance(result, str):
line_ending = "\n"
elif isinstance(result, bytes):
# We are redefining the variable type on purpose since python broke backwards compatibility between 2 & 3.
line_ending = b"\n"
# Avoid double line endings
if not result.endswith(line_ending):
result += line_ending
return result
# Return the wrapper
return __wrapped_func | 71af6af25aa93e0e8f80958b5caf5266f598c878 | 20,047 |
from typing import List
from typing import Tuple
def sigma_splitter(float_arr: List[float]) -> Tuple[List[List[int]], List[List[int]], List[List[int]]]:
"""
separates the NCOF score into the 1-3 sigma outliers for the NCOF input
@param float_arr: List[float]
@return: inliers , pos_outliers , neg_outliers: List[List[int]], List[List[int]], List[List[int]]
"""
"calculates the mean and std of the input score"
mean = np.mean(float_arr)
std = np.std(float_arr)
"calculate which indexes that are input inliers"
inliers = np.where(np.logical_and(float_arr >= mean - std, float_arr <= mean + std))
inliers = inliers[0].tolist()
"Calculates the 1-sigma postive outliers"
one_pos_sigma = np.where(np.logical_and(mean + std <= float_arr, float_arr < mean + 2 * std))
"Calculates the 2-sigma postive outliers"
two_pos_sigma = np.where(np.logical_and(mean + 2 * std <= float_arr, float_arr < mean + 3 * std))
"Calculates the 3-sigma postive outliers"
three_pos_sigma = np.where(mean + 3 * std <= float_arr)
"Calculates the 1-sigma negative outliers"
one_neg_sigma = np.where(np.logical_and(mean - 2 * std < float_arr, float_arr <= mean - std))
"Calculates the 2-sigma negative outliers"
two_neg_sigma = np.where(np.logical_and(mean - 3 * std < float_arr, float_arr <= mean - 2 * std))
"Calculates the 3-sigma negative outliers"
three_neg_sigma = np.where(float_arr <= mean - 3 * std)
"stores the positive outliers in a list of lists"
pos_outliers = [one_pos_sigma[0],
two_pos_sigma[0],
three_pos_sigma[0]]
pos_outliers = [l.tolist() for l in pos_outliers]
"stores the negative outliers in a list of lists"
neg_outliers = [one_neg_sigma[0],
two_neg_sigma[0],
three_neg_sigma[0]]
neg_outliers = [l.tolist() for l in neg_outliers]
"OUTPUT: list of indexes"
"inliers: list of all inliers"
"pos_outliers: list of 3 lists that corresponds to the 1,2,3 positive sigma outlers"
"neg_outliers: list of 3 lists that corresponds to the 1,2,3 negative sigma outlers"
return inliers, pos_outliers, neg_outliers | 824c3d11ffa1fb81763cdf815de3e37a7b8aa335 | 20,048 |
import torch
def cosine_beta_schedule(timesteps, s = 0.008, thres = 0.999):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 1
x = torch.linspace(0, timesteps, steps, dtype = torch.float64)
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, thres) | a1969deafdb282955a53b15978a055d15f0678a0 | 20,049 |
from typing import Callable
from re import T
from typing import Optional
from typing import Dict
from typing import Any
import yaml
def construct_from_yaml(
constructor: Callable[..., T],
yaml_dict: Optional[Dict[str, Any]] = None,
) -> T:
"""Build ``constructor`` from ``yaml_dict``
Args:
constructor (Callable): The constructor to test (such as an Hparams class)
yaml_dict (Dict[str, Any], optional): The YAML. Defaults to ``None``, which is equivalent
to an empty dictionary.
"""
yaml_dict = {} if yaml_dict is None else yaml_dict
# ensure that yaml_dict is actually a dictionary of only json-serializable objects
yaml_dict = yaml.safe_load(yaml.safe_dump(yaml_dict))
instance = hp.create(constructor, yaml_dict, cli_args=False)
return instance | 5cb92f8af0b0ab49069e88b74b7b10fdf2cc797d | 20,050 |
import tokenize
def text_to_document(text, language="en"):
""" Returns string text as list of Sentences """
splitter = _sentence_splitters[language]
utext = unicode(text, 'utf-8') if isinstance(text, str) else text
sentences = splitter.tokenize(utext)
return [tokenize(text, language) for text in sentences] | 2f196c51a979a2f9a849ebd6f89203c907406789 | 20,051 |
def get_top_playlists_route(type):
"""
An endpoint to retrieve the "top" of a certain demographic of playlists or albums.
This endpoint is useful in generating views like:
- Top playlists
- Top Albums
- Top playlists of a certain mood
- Top playlists of a certain mood from people you follow
Args:
type: (string) The `type` (same as repost/save type) to query from.
limit?: (number) default=16, max=100
mood?: (string) default=None
filter?: (string) Optional filter to include (supports 'followees') default=None
"""
args = to_dict(request.args)
if 'limit' in request.args:
args['limit'] = min(request.args.get('limit', type=int), 100)
else:
args['limit'] = 16
if 'mood' in request.args:
args['mood'] = request.args.get('mood')
else:
args['mood'] = None
if "with_users" in request.args:
args["with_users"] = parse_bool_param(request.args.get("with_users"))
try:
playlists = get_top_playlists(type, args)
return api_helpers.success_response(playlists)
except exceptions.ArgumentError as e:
return api_helpers.error_response(str(e), 400) | 2012e95073291669a6bb881afa853961922160c5 | 20,052 |
import urllib
def parse_host(incomplete_uri: str) -> str:
"""Get netloc/host from incomplete uri."""
# without // it is interpreted as relative
return urllib.parse.urlparse(f"//{incomplete_uri}").netloc | 099284e970756055f2616d484014db210cb04a76 | 20,053 |
import functools
import operator
def inner_by_delta(vec1: Vec, vec2: Vec):
"""Compute the inner product of two vectors by delta.
The two vectors are assumed to be from the same base and have the same
number of indices, or ValueError will be raised.
"""
indices1 = vec1.indices
indices2 = vec2.indices
if vec1.label != vec2.label or len(indices1) != len(indices2):
raise ValueError(
'Invalid vectors to computer inner product by delta', (vec1, vec2)
)
return functools.reduce(operator.mul, (
KroneckerDelta(i, j) for i, j in zip(indices1, indices2)
), Integer(1)) | 660f7d73e6d73cd4dfdf73d532e90e5a29f38481 | 20,054 |
def remove_mapping(rxn_smi: str, keep_reagents: bool = False) -> str:
"""
Removes all atom mapping from the reaction SMILES string
Parameters
----------
rxn_smi : str
The reaction SMILES string whose atom mapping is to be removed
keep_reagents : bool (Default = False)
whether to keep the reagents in the output reaction SMILES string
Returns
-------
str
The reaction SMILES string with all atom mapping removed
Also see: clean_rxn_smis_50k_one_phase, clean_rxn_smis_FULL_one_phase
"""
rxn = rdChemReactions.ReactionFromSmarts(rxn_smi, useSmiles=True)
if not keep_reagents:
rxn.RemoveAgentTemplates()
prods = [mol for mol in rxn.GetProducts()]
for prod in prods:
for atom in prod.GetAtoms():
if atom.HasProp("molAtomMapNumber"):
atom.ClearProp("molAtomMapNumber")
rcts = [mol for mol in rxn.GetReactants()]
for rct in rcts:
for atom in rct.GetAtoms():
if atom.HasProp("molAtomMapNumber"):
atom.ClearProp("molAtomMapNumber")
return rdChemReactions.ReactionToSmiles(rxn) | fb16648fee136359bc8ef96684824319221a3359 | 20,055 |
def generate_bot_master_get_results_message(message_id, receiving_host, receiving_port):
"""
:rtype : fortrace.net.proto.genericmessage_pb2.GenericMessage
:type receiving_port: int
:type receiving_host: str
:type message_id: long
:param message_id: the id of this message
:param receiving_host: the host that receives the order
:param receiving_port: the host's port
:return: the message to be generated
"""
m = genericmessage_pb2.GenericMessage()
m.message_type = messagetypes_pb2.BOTMASTERGETRESULT
m.message_id = message_id
m.Extensions[botmastermessage_pb2.bm_result].receiving_host = receiving_host
m.Extensions[botmastermessage_pb2.bm_result].receiving_port = receiving_port
assert m.IsInitialized()
return m | 4c46a9c1bf69022092b7df4b48e302a87d2d7b90 | 20,056 |
import fileinput
from datetime import datetime
def readLogData(username,level,root='.'):
"""
Extracts key events from a log
"""
filename = getFilename(username,level,extension='log',root=root)
log = []
start = None
for line in fileinput.input(filename):
elements = line.split()
if elements[2] == MESSAGE_TAG:
now = datetime.datetime.strptime('%s %s' % (elements[0][1:],elements[1][:-1]),'%Y-%m-%d %H:%M:%S')
log.insert(0,{'type': 'message','content': ' '.join(elements[3:]),
'time': now-start})
elif elements[2] == LOCATION_TAG:
now = datetime.datetime.strptime('%s %s' % (elements[0][1:],elements[1][:-1]),'%Y-%m-%d %H:%M:%S')
index = symbol2index(elements[3],level)
waypoint = WAYPOINTS[level][index]
log.insert(0,{'type': 'location','destination': waypoint['name'],
'buildingNo': index+1,'buildingTotal': len(WAYPOINTS[level]),
'time': now-start})
elif elements[2] == CREATE_TAG:
start = datetime.datetime.strptime('%s %s' % (elements[0][1:],elements[1][:-1]),'%Y-%m-%d %H:%M:%S')
log.insert(0,{'type': 'create',
'time': 'Start','start': start})
elif elements[2] == COMPLETE_TAG:
now = datetime.datetime.strptime('%s %s' % (elements[0][1:],elements[1][:-1]),'%Y-%m-%d %H:%M:%S')
log.insert(0,{'type': 'complete','success': elements[3] == 'success',
'time': now-start})
elif elements[2] == USER_TAG:
log[0]['choice'] = elements[3]
log[0]['location'] = WAYPOINTS[level][symbol2index(elements[4],level)]['name']
log[0]['danger'] = elements[5]
log[0]['dead'] = elements[6]
log[0]['image'] = elements[7]
log[0]['content'] = ' '.join(elements[8:])[1:-1]
if ') (' in log[0]['content']:
log[0]['content'],log[0]['ack'] = log[0]['content'].split(') (')
else:
log[0]['ack'] = ''
fileinput.close()
return log | f94c3e715d021b206ef46766fdc0e6051784615e | 20,057 |
def get_type1(pkmn):
"""get_type1(pkmn) returns Type 1 of the Pokémon with the name 'pkmn' """
return __pokemon__[pkmn]['Type 1'] | c4290f695160f2f1962f1dca158359e250a4803a | 20,058 |
def load_json(fname):
"""
Load a JSON file containing a riptide object (or list/dict/composition thereof)
"""
with open(fname, 'r') as f:
return from_json(f.read()) | 93f771ae0ba31974b564e1520412fab5719b08be | 20,059 |
def get_stock_data(symbol, start_date, end_date, source="phisix", format="c"):
"""Returns pricing data for a specified stock and source.
Parameters
----------
symbol : str
Symbol of the stock in the PSE or Yahoo.
You can refer to these links:
PHISIX: https://www.pesobility.com/stock
YAHOO: https://www.nasdaq.com/market-activity/stocks/screener?exchange=nasdaq
start_date : str
Starting date (YYYY-MM-DD) of the period that you want to get data on
end_date : str
Ending date (YYYY-MM-DD) of the period you want to get data on
source : str
First source to query from ("pse", "yahoo").
If the stock is not found in the first source,
the query is run on the other source.
format : str
Format of the output data
Returns
-------
pandas.DataFrame
Stock data (in the specified `format`) for the specified company and date range
"""
df_columns = [DATA_FORMAT_COLS[c] for c in format]
if source == "phisix":
# The query is run on 'phisix', but if the symbol isn't found, the same query is run on 'yahoo'.
df = get_pse_data(symbol, start_date, end_date, format=format)
if df is None:
df = get_yahoo_data(symbol, start_date, end_date)
elif source == "yahoo":
# The query is run on 'yahoo', but if the symbol isn't found, the same query is run on 'phisix'.
df = get_yahoo_data(symbol, start_date, end_date)
if df is None:
df = get_pse_data(symbol, start_date, end_date)
else:
raise Exception("Source must be either 'phisix' or 'yahoo'")
missing_columns = [col for col in df_columns if col not in df.columns]
# Fill missing columns with np.nan
for missing_column in missing_columns:
df[missing_column] = np.nan
if len(missing_columns) > 0:
print("Missing columns filled w/ NaN:", missing_columns)
return df[df_columns] | 94171c950198f0975c4232f232ec9be93bd3f2a3 | 20,060 |
def extract_borderless(result) -> list:
"""
extracts borderless masks from result
Args:
result:
Returns: a list of the borderless tables. Each array describes a borderless table bounding box.
the two coordinates in the array are the top right and bottom left coordinates of the bounding box.
"""
result_borderless = []
for r in result[0][2]:
if r[4] > .85:
# slices the threshold value of
result_borderless.append(r[:4].astype(int))
return result_borderless | e81844a5deb553bf8d7380ebec8a76fec219ee72 | 20,061 |
import itertools
def get_frequent_length_k_itemsets(transactions, min_support=0.2, k=1, frequent_sub_itemsets=None):
"""Returns all the length-k itemsets, from the transactions, that satisfy
min_support.
Parameters
----------
transactions : list of list
min_support : float, optional
From 0.0 to 1.0. Percentage of transactions that should contain an
itemset for it to be considered frequent.
k : int, optional
Length that the frequent itemsets should be
frequent_sub_itemsets : frozenset of frozenset, optional
Facilitates candidate pruning by the Apriori property. Length-k itemset
candidates that aren't supersets of at least 1 frequent sub-itemset are
pruned.
Returns
-------
list of frozenset
list of float
"""
if min_support <= 0 or min_support > 1:
raise ValueError('min_support must be greater than 0 and less than or equal to 1.0')
if k <= 0:
raise ValueError('k must be greater than 0')
all_items = set()
if frequent_sub_itemsets:
for sub_itemset in frequent_sub_itemsets:
all_items = all_items.union(sub_itemset)
else:
for transaction in transactions:
all_items = all_items.union(transaction)
all_length_k_itemsets = itertools.product(all_items, repeat=k)
all_length_k_itemsets = frozenset(frozenset(itemset) for itemset in all_length_k_itemsets)
all_length_k_itemsets = frozenset(filter(lambda itemset: len(itemset) == k, all_length_k_itemsets))
# Remove itemsets that don't have a frequent sub-itemset to take advantage
# of the Apriori property
pruned_length_k_itemsets = all_length_k_itemsets
if frequent_sub_itemsets:
pruned_length_k_itemsets = set()
for itemset in all_length_k_itemsets:
has_frequent_sub_itemset = False
for sub_itemset in frequent_sub_itemsets:
if sub_itemset.issubset(itemset):
has_frequent_sub_itemset = True
if has_frequent_sub_itemset:
pruned_length_k_itemsets.add(itemset)
frequent_itemsets = []
frequent_supports = []
supports = support(transactions, pruned_length_k_itemsets)
for itemset, itemset_support in supports.items():
if itemset_support >= min_support:
frequent_itemsets.append(itemset)
frequent_supports.append(itemset_support)
return frequent_itemsets, frequent_supports | a293b48c62ebbafda7fa89abb6792f04c4ff1371 | 20,062 |
import types
def create_news_markup():
"""
Метод, создающий клавиатуру для новостей кино
:return: telebot.types.ReplyKeyboardMarkup
"""
news_markup = types.ReplyKeyboardMarkup()
news_markup.row(Commands.GET_BACK_COMMAND)
return news_markup | 654ec227d07fe914c795931f48ce634e0a4a6fc3 | 20,063 |
def _generate_description_from(command, name, description):
"""
Generates description from the command and it's optionally given description. If both `description` and
`command.__doc__` is missing, defaults to `name`.
Parameters
----------
command : `None` or `callable`
The command's function.
name : `str` or `None`
The command's name, if name defaulting should be applied.
description : `Any`
The command's description.
Returns
-------
description : `str`
The generated description.
Raises
------
ValueError
If `description` length is out of range [2:100].
"""
while True:
if (description is not None) or isinstance(description, str):
break
if command is not None:
description = getattr(command, '__doc__', None)
if (description is not None) and isinstance(description, str):
break
if name is not None:
description = name
break
return
description = normalize_description(description)
if description is None:
description_length = 0
else:
description_length = len(description)
if (
description_length < APPLICATION_COMMAND_DESCRIPTION_LENGTH_MIN
or description_length > APPLICATION_COMMAND_DESCRIPTION_LENGTH_MAX
):
raise ValueError(
f'`description` length is out of the expected range '
f'[{APPLICATION_COMMAND_DESCRIPTION_LENGTH_MIN}:{APPLICATION_COMMAND_DESCRIPTION_LENGTH_MAX}], got '
f'{description_length!r}; {description!r}.'
)
return description | e2f782f7e74635b3c50273b36b837e48d7999f4f | 20,065 |
def uses_na_format(station: str) -> bool:
"""
Returns True if the station uses the North American format,
False if the International format
"""
if station[0] in NA_REGIONS:
return True
elif station[0] in IN_REGIONS:
return False
elif station[:2] in M_NA_REGIONS:
return True
elif station[:2] in M_IN_REGIONS:
return False
raise BadStation("Station doesn't start with a recognized character set") | b3158a85ae9b1ba45ebeb3de27491650d7f4c4c8 | 20,066 |
def openFile(prompt,key = "r",defaulttype = None, defaultname = None):
"""
Method to open a text file with sanity checking, optional defaults and reprompt on failure.
This is the main used callable function to open files.
:param prompt: the prompt to be displayed
:type prompt: str
:param key: the key passed to open, default is "r" (read)
:type key: str
:param defaulttype: the default extension which will be added if not supplied, (default to None)
:type defailttype: str
:param defaultname: the defaault filename, (defaults to None)
:type defaultname: str
:return: the the opened file descriptor.
The file names is processded to expand environmental variable and user names\
so for example $ENV/dir/file.data or ~user/dir/file.data are expanded
"""
while True:
filename = getFilename(prompt,defaulttype,defaultname) # Get the filename
try:
filestream = open(filename,str(key)) # try and open
return filestream
except IOError:
logger.error("Failed to open file '{0:s}' with key '{1:s}'".\
format(filename,str(key))) | e9985872c0beb15eaa5bafa543eefb01f5fd8413 | 20,067 |
def dsphere(n=100, d=2, r=1, noise=None, ambient=None):
"""
Sample `n` data points on a d-sphere.
Parameters
-----------
n : int
Number of data points in shape.
r : float
Radius of sphere.
ambient : int, default=None
Embed the sphere into a space with ambient dimension equal to `ambient`. The sphere is randomly rotated in this high dimensional space.
"""
data = np.random.randn(n, d+1)
# Normalize points to the sphere
data = r * data / np.sqrt(np.sum(data**2, 1)[:, None])
if noise:
data += noise * np.random.randn(*data.shape)
if ambient:
assert ambient > d, "Must embed in higher dimensions"
data = embed(data, ambient)
return data | 8957a328c2025fbdb3741b004f2fb3825f19e4d9 | 20,068 |
def topological_sort_by_down(start_nodes=None, all_nodes=None):
"""
Topological sort method by down stream direction.
'start_nodes' and 'all_nodes' only one needs to be given.
Args:
start_nodes (list[NodeGraphQt.BaseNode]):
(Optional) the start update nodes of the graph.
all_nodes (list[NodeGraphQt.BaseNode]):
(Optional) if 'start_nodes' is None the function can calculate
start nodes from 'all_nodes'.
Returns:
list[NodeGraphQt.BaseNode]: sorted nodes.
"""
if not start_nodes and not all_nodes:
return []
if start_nodes:
start_nodes = __remove_BackdropNode(start_nodes)
if all_nodes:
all_nodes = __remove_BackdropNode(all_nodes)
if not start_nodes:
start_nodes = [n for n in all_nodes if not _has_input_node(n)]
if not start_nodes:
return []
if not [n for n in start_nodes if _has_output_node(n)]:
return start_nodes
graph = _build_down_stream_graph(start_nodes)
return _sort_nodes(graph, start_nodes, True) | 22a36d4f8225ae2978459796f115059e2bbb8d62 | 20,069 |
from datetime import datetime
def parseYear(year, patterns):
""""This function returns a string representing a year based on the input and a list of possible patterns.
>>> parseYear('2021', ['%Y'])
'2021'
>>> parseYear('2021', ['(%Y)', '%Y'])
'2021'
>>> parseYear('(2021)', ['%Y', '(%Y)'])
'2021'
"""
parsedYear = None
for p in patterns:
try:
tmp = datetime.strptime(year, p).date().year
parsedYear = str(tmp)
break
except ValueError:
pass
if parsedYear == None:
return year
else:
return parsedYear | 743378c868a2439f721e428f676092f9da0a2e7a | 20,072 |
def fit_oxy_nii(target_row,
velocity_column = None,
data_column = None,
IP = "center",
**kwargs):
"""
Fits oxygen bright line to spectrum for future subtraction
Parameters
----------
target_row: `SkySurvey` row
Row to match spectra to
data_column: 'str', optional, must be keyword
Name of data column, default of "DATA"
velocity_column: 'str', optional, must be keyword
Name of velocity column, default of "VELOCITY"
**kwargs: dict
keywords passed to Model.fit()
"""
if velocity_column is None:
velocity_column = "VELOCITY_GEO"
if data_column is None:
data_column = "DATA"
def bright_atm(x, baseline, amp, mean, std):
g = c_component(amp, mean, std, IP = IP)
y = np.zeros_like(x)
y+= baseline
y+= g(x)
return y
bright_atm_model = Model(bright_atm)
params = Parameters()
params.add("baseline", value = np.nanmin(target_row[data_column]))
params.add("amp", value = np.nanmax(target_row[data_column]))
params.add("mean", value = -281.3)
params.add("std", value = 3)
exclusion_mask = (target_row[velocity_column] < -315) | (target_row[velocity_column] > -215)
res = bright_atm_model.fit(target_row[data_column][np.invert(exclusion_mask)],
x = target_row[velocity_column][np.invert(exclusion_mask)],
params = params,
nan_policy = "omit",
**kwargs)
return res | 1251bf102abaec690fc97117c2409e2f5e89f35b | 20,073 |
def image_reproject_from_healpix_to_file(source_image_hdu, target_image_hdu_header, filepath=None):
""" reproject from healpix image to normal wcs image
:param source_image_hdu: the HDU object of source image (healpix)
:param target_image_hdu_header: the HDU object of target image (wcs)
:param filepath: the output file path
:return: array, footprint
"""
array, footprint = reproject_from_healpix(source_image_hdu, target_image_hdu_header)
if filepath is not None:
# write file
fits.writeto(filepath, array, target_image_hdu_header, clobber=True) # clobber=OVERWRITE
else:
# return array & footprint
return array, footprint | b261663f18ccdf095c0b6e20c02d2ebc0282b713 | 20,075 |
def flux_reddening_wl(wl, flux_wl, ebv, Rv=None, law=LawFitz, mode=ReddeningLaw.MW):
"""
Apply extinction curves to flux(lambda) values
:param wl: [A]
:param flux_wl: [ergs s^-1 cm^-2 A^-1]
:param ebv: E(B-V)
:param Rv: R_V
:param law: the variant of extinction curves
:param mode: type of extinction curves (MW, LMC, SMC)
:return: reddening flux
"""
if Rv is None:
Rv = law.Rv[mode]
A_lambda = law.Almd(wl, ebv, Rv=Rv)
res = flux_wl * 10 ** (-0.4 * A_lambda)
return res | 668d1824d988989a3411c798614aeb1bc6a63cb6 | 20,076 |
import string
def genRandomString( size: int = 5, upper: bool = False, lower: bool = False, mix: bool = False, numbers: bool = True) -> str:
"""
Generates a random string of the given size and content.
:param numbers: Numbers are included in the string. Default True.
:param upper: Uppercase only. Default False.
:param lower: Lowecase only. Default False.
:param mix: Mix lowecase and uppercase. Default False.
:param size: Size of the desired string.
:return: String
"""
chars = ''
if upper:
chars = string.ascii_uppercase
elif lower:
chars = string.ascii_lowercase
elif mix:
chars = string.ascii_letters
if numbers:
chars = chars + string.digits
return ''.join(choice(chars) for _ in range(size)) | a63a2be76675bbb42da2a4cd0ae20db8be723ee3 | 20,077 |
def process_whole_image(model, images, num_crops=4, receptive_field=61, padding=None):
"""Slice images into num_crops * num_crops pieces, and use the model to
process each small image.
Args:
model: model that will process each small image
images: numpy array that is too big for model.predict(images)
num_crops: number of slices for the x and y axis to create sub-images
receptive_field: receptive field used by model, required to pad images
padding: type of padding for input images, one of {'reflect', 'zero'}
Returns:
model_output: numpy array containing model outputs for each sub-image
"""
if K.image_data_format() == 'channels_first':
channel_axis = 1
row_axis = len(images.shape) - 2
col_axis = len(images.shape) - 1
else:
channel_axis = len(images.shape) - 1
row_axis = len(images.shape) - 3
col_axis = len(images.shape) - 2
if not padding:
padding_layers = get_padding_layers(model)
if padding_layers:
padding = 'reflect' if 'reflect' in padding_layers[0] else 'zero'
if str(padding).lower() not in {'reflect', 'zero'}:
raise ValueError('Expected `padding_mode` to be either `zero` or '
'`reflect`. Got ', padding)
# Split the frames into quarters, as the full image size is too large
crop_x = images.shape[row_axis] // num_crops
crop_y = images.shape[col_axis] // num_crops
# Set up receptive field window for padding
win_x, win_y = (receptive_field - 1) // 2, (receptive_field - 1) // 2
# instantiate matrix for model output
model_output_shape = tuple(list(model.layers[-1].output_shape)[1:])
if channel_axis == 1:
output = np.zeros((images.shape[0], model_output_shape[1], *images.shape[2:]))
else:
output = np.zeros((*images.shape[0:-1], model_output_shape[-1]))
expected_input_shape = get_cropped_input_shape(images, num_crops, receptive_field)
if expected_input_shape != model.input_shape[1:]:
raise ValueError('Expected model.input_shape to be {}. Got {}. Use '
'`get_cropped_input_shape()` to recreate your model '
' with the proper input_shape'.format(
expected_input_shape, model.input_shape[1:]))
# pad the images only in the x and y axes
pad_width = []
for i in range(len(images.shape)):
if i == row_axis:
pad_width.append((win_x, win_x))
elif i == col_axis:
pad_width.append((win_y, win_y))
else:
pad_width.append((0, 0))
if str(padding).lower() == 'reflect':
padded_images = np.pad(images, pad_width, mode='reflect')
else:
padded_images = np.pad(images, pad_width, mode='constant', constant_values=0)
for i in range(num_crops):
for j in range(num_crops):
e, f = i * crop_x, (i + 1) * crop_x + 2 * win_x
g, h = j * crop_y, (j + 1) * crop_y + 2 * win_y
if images.ndim == 5:
if channel_axis == 1:
predicted = model.predict(padded_images[:, :, :, e:f, g:h])
else:
predicted = model.predict(padded_images[:, :, e:f, g:h, :])
else:
if channel_axis == 1:
predicted = model.predict(padded_images[:, :, e:f, g:h])
else:
predicted = model.predict(padded_images[:, e:f, g:h, :])
# if using skip_connections, get the final model output
if isinstance(predicted, list):
predicted = predicted[-1]
# if the model uses padding, trim the output images to proper shape
# if model does not use padding, images should already be correct
if padding:
predicted = trim_padding(predicted, win_x, win_y)
a, b = i * crop_x, (i + 1) * crop_x
c, d = j * crop_y, (j + 1) * crop_y
if images.ndim == 5:
if channel_axis == 1:
output[:, :, :, a:b, c:d] = predicted
else:
output[:, :, a:b, c:d, :] = predicted
else:
if channel_axis == 1:
output[:, :, a:b, c:d] = predicted
else:
output[:, a:b, c:d, :] = predicted
return output | 3e9ab9485662f9bae40217c60837d8d8cba020d3 | 20,078 |
def compute_covariance(model, xy, XY=None):
"""Returns the covariance matrix for a given set of data"""
if xy.size == 1:
dist = 0
elif XY is None:
dist = squareform(pdist(xy))
else:
dist = cdist(xy, XY)
C = model(dist)
return C | b898ef57155898c75797033e057c6cab4e2487bc | 20,079 |
import sqlite3
def prob1(cur: sqlite3.Cursor) -> pd.DataFrame:
"""List how many stops are in the database.
Parameters
----------
cur (sqlite3.Cursor) : The cursor for the database we're accessing.
Returns
-------
(pd.DataFrame) : Table with the solution.
"""
cur.execute("SELECT COUNT(*) FROM stops;")
return pd.DataFrame(cur.fetchall()) | ed6a3a316e89177a6224fd7513ca5c098940e312 | 20,080 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.