content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def matrix_zeros(m, n, **options):
""""Get a zeros matrix for a given format."""
format = options.get('format', 'sympy')
dtype = options.get('dtype', 'float64')
spmatrix = options.get('spmatrix', 'csr')
if format == 'sympy':
return zeros(m, n)
elif format == 'numpy':
return _numpy_zeros(m, n, **options)
elif format == 'scipy.sparse':
return _scipy_sparse_zeros(m, n, **options)
raise NotImplementedError('Invaild format: %r' % format) | e4c87a85dd6a37868704205b21732d82a4ffb2df | 3,900 |
def make_password(password, salt=None):
"""
Turn a plain-text password into a hash for database storage
Same as encode() but generate a new random salt. If password is None then
return a concatenation of UNUSABLE_PASSWORD_PREFIX and a random string,
which disallows logins. Additional random string reduces chances of gaining
access to staff or superuser accounts. See ticket #20079 for more info.
"""
if password is None:
return UNUSABLE_PASSWORD_PREFIX + get_random_string(
UNUSABLE_PASSWORD_SUFFIX_LENGTH)
if not isinstance(password, (bytes, str)):
raise TypeError(
'Password must be a string or bytes, got %s.'
% type(password).__qualname__
)
hasher = PBKDF2PasswordHasher()
salt = salt or hasher.salt()
return hasher.encode(password, salt) | 6c39486c2eb88af278580cdf4b86b7b45489eef0 | 3,901 |
from typing import Optional
from typing import TextIO
from typing import Type
import csv
from pathlib import Path
import sys
def get_dialect(
filename: str, filehandle: Optional[TextIO] = None
) -> Type[csv.Dialect]:
"""Try to guess dialect based on file name or contents."""
dialect: Type[csv.Dialect] = csv.excel_tab
file_path = Path(filename)
if file_path.suffix == ".txt":
pass
elif file_path.suffix == ".csv":
if filehandle:
dialect = csv.Sniffer().sniff(filehandle.read(4 * 1024))
filehandle.seek(0)
else:
sys.stderr.write("Error: File does not have the ending csv or txt.\n")
sys.exit(2)
return dialect | 91d21e5bb321e7deb1e4b8db445d5c51d8138456 | 3,902 |
from typing import Optional
from typing import Any
import os
def load_object(primary_path: str, file_name: Optional[str] = None, module: Optional[str] = "pickle") -> Any:
"""
This is a generic function to load any given
object using different `module`s, e.g. pickle,
dill, and yaml.
Note: See `get_file_path()` for details on how
how to set `primary_path` and `file_name`.
"""
file_path = get_file_path(primary_path, file_name)
logger.info(f"Loading '{file_path}'...")
if os.path.isfile(file_path):
if module == "yaml":
obj = load_yaml(file_path)
else:
obj = load_pickle(file_path, module)
logger.info(f"Successfully loaded '{file_path}'.")
return obj
else:
raise FileNotFoundError(f"Could not find '{file_path}'.") | e6f8e423637ae8a26b623d754b9a7ae3699ef6f5 | 3,903 |
from typing import Union
from pathlib import Path
from typing import Tuple
import torch
from typing import Optional
from typing import Callable
from re import T
def compute_spectrogram(
audio: Union[Path, Tuple[torch.Tensor, int]],
n_fft: int,
win_length: Optional[int],
hop_length: int,
n_mels: int,
mel: bool,
time_window: Optional[Tuple[int, int]],
**kwargs,
) -> torch.Tensor:
"""
Get the spectrogram of an audio file.
Args:
audio: Path of the audio file or a (waveform, sample_rate) tuple.
n_fft:
win_length:
hop_length:
n_mels:
mel: If true we want melodic spectrograms.
time_window: A tuple of two time values such we get the sliced spectrogram w.r.t. that window.
kwargs:
"""
# See if we have to deal with an audio file or (waveform, sample rate).
if isinstance(audio, Path):
waveform, sample_rate = torchaudio.load(audio, format="ogg")
elif isinstance(audio[0], torch.Tensor) and isinstance(audio[1], int):
waveform = audio[0]
sample_rate = audio[1]
else:
raise Exception(
"Input audio worng, it must be either a path to an audio file or a (waveform, sample rate) tuple."
)
spectrogram: Callable
if not mel:
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
center=True,
pad_mode="reflect",
power=2.0,
)
else:
# Mel Spectrogram transform.
spectrogram = T.MelSpectrogram(
sample_rate=sample_rate,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
center=True,
pad_mode="reflect",
power=2.0,
norm="slaney",
onesided=True,
n_mels=n_mels,
mel_scale="htk",
)
if time_window:
# We convert the time window from seconds to frames.
start, end = np.asarray(time_window) * sample_rate
waveform = waveform[:, start:end]
return spectrogram(waveform) | 918fc0c9273b2085ded2ca8d6dd5d4db758538f0 | 3,904 |
def decode_html_dir(new):
""" konvertiert bestimmte Spalte in HTML-Entities """
def decode(key):
return decode_html(unicode(new[key]))
if new.has_key('title') and new['title'].find('&') >= 0:
new['title'] = decode('title')
if new.has_key('sub_title') and new['sub_title'].find('&') >= 0:
new['sub_title'] = decode('sub_title')
if new.has_key('text') and new['text'].find('&') >= 0:
new['text'] = decode('text')
if new.has_key('text_more') and new['text_more'].find('&') >= 0:
new['text_more'] = decode('text_more')
if new.has_key('sections') and new['sections'].find('&') >= 0:
new['sections'] = decode('sections')
if new.has_key('section') and new['section'].find('&') >= 0:
new['section'] = decode('section')
if new.has_key('anti_spam_question'):
new['anti_spam_question'] = decode('anti_spam_question')
return new | 029483974a26befc2df8d92babf53f5a32be31f5 | 3,905 |
def apply_hash(h, key):
"""
Apply a hash function to the key.
This function is a wrapper for xxhash functions with initialized seeds.
Currently assume h is a xxhash.x32 object with initialized seed
If we change choice of hash function later, it will be easier to change
how we apply the hash (either through a function or an object) in this method
Parameters
----------
h : hash function to apply
key : key to hash
Returns
-------
val : int
The hash value of the hashed key.
"""
h.update(key)
val = h.intdigest() # TODO: What representation to return? (hex in str format?)
h.reset()
return val | e79ce4fdbb6f6c09b6115b35e619894b67ce991a | 3,906 |
def dmsp_enz_deg(
c,
t,
alpha,
vmax,
vmax_32,
kappa_32,
k
):
"""
Function that computes dD32_dt and dD34_dt of DMSP
Parameters
----------
c: float.
Concentration of DMSP in nM.
t: int
Integration time in min.
alpha: float.
Alpha for cleavage by DddP from this study.
vmax: float.
Vmax for cleavage by DddP, calculated from the K M that the enzyme should have to
exhibit the pattern of d34S DMSP vs. time, in nM/min/nM enzyme
Vmax_d: float.
km: float.
K M that the enzyme should have to exhibit the pattern of d34S DMSP vs. time, in nM.
k: float.
Degradation rate of the enzyme, in min^-1.
Returns
-------
The dD32_dt and dD34_dt of DMSP
"""
# Unpack isotopes
enzyme, dmsp_34, dmsp_32 = c
#Calculate vmax_34 assuming that Vmax total = Vmax_32 + Vmax_34
#This assumption would only hold true at saturation
vmax_34 = vmax-vmax_32
#Determination of kappa 32 from kappa 34 and the fractionation factor
kappa_34 = kappa_32 * alpha
# Calculate dD34_dt
dD34_dt = - ((kappa_34 * enzyme * (vmax_34 * enzyme * dmsp_34/((vmax_34 * enzyme)+(kappa_34 * enzyme * dmsp_34)))))
# Calculate dD32_dt
dD32_dt = - ((kappa_32 * enzyme * (vmax_32 * enzyme * dmsp_32/((vmax_32 * enzyme)+(kappa_32 * enzyme * dmsp_32)))))
#Calculate dE_dt
dE_dt = -k*enzyme
return [dE_dt, dD34_dt, dD32_dt] | d5e4b77523ab469b61eec106a28e1e3143644bf7 | 3,907 |
def plot_holdings(returns, positions, legend_loc='best', ax=None, **kwargs):
"""Plots total amount of stocks with an active position, either short
or long.
Displays daily total, daily average per month, and all-time daily
average.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame, optional
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
positions = positions.copy().drop('cash', axis='columns')
df_holdings = positions.apply(lambda x: np.sum(x != 0), axis='columns')
df_holdings_by_month = df_holdings.resample('1M', how='mean')
df_holdings.plot(color='steelblue', alpha=0.6, lw=0.5, ax=ax, **kwargs)
df_holdings_by_month.plot(
color='orangered',
alpha=0.5,
lw=2,
ax=ax,
**kwargs)
ax.axhline(
df_holdings.values.mean(),
color='steelblue',
ls='--',
lw=3,
alpha=1.0)
ax.set_xlim((returns.index[0], returns.index[-1]))
ax.legend(['Daily holdings',
'Average daily holdings, by month',
'Average daily holdings, net'],
loc=legend_loc)
ax.set_title('Holdings per Day')
ax.set_ylabel('Amount of holdings per day')
ax.set_xlabel('')
return ax | 5e375729aa48d0d3f8aada17268048a68a662421 | 3,908 |
import os
def get_systemd_run_args(available_memory):
"""
Figure out if we're on system with cgroups v2, or not, and return
appropriate systemd-run args.
If we don't have v2, we'll need to be root, unfortunately.
"""
args = [
"systemd-run",
"--uid",
str(os.geteuid()),
"--gid",
str(os.getegid()),
"-p",
f"MemoryLimit={available_memory // 2}B",
]
try:
check_call(args + ["--user", "printf", "hello"])
args += ["--user", "--scope"]
except CalledProcessError:
# cgroups v1 doesn't do --user :(
args = ["sudo", "--preserve-env=PATH"] + args + ["-t", "--same-dir"]
return args | f872286bf6759e26e24a5331db108ccee8f89605 | 3,909 |
def concatenation_sum(n: int) -> int:
"""
Algo:
1. Find length of num (n), i.e. number of digits 'd'.
2. Determine largest number with 'd - 1' digits => L = 10^(d - 1) - 1
3. Find diff => f = n - L
4. Now, the sum => s1 = f * d, gives us the number of digits in the string formed by all 'd'-digit numbers
less than or equal to 'n'.
5. Now, iteratively calculate and sum ((10^(d-i) - 10^(d-i-1)) * (d-i)) for i ∈ [1, d)
6. This will determine the number of digits in the string formed by all 'd-1', 'd-2', and so on -digits numbers.
:param n: Max number
:return: Number of digits in the string, formed by concatenating all the numbers from 1 to n.
"""
d = len(str(n))
L = 10**(d - 1) - 1
f = n - L
s1 = f * d
s2 = get_numdigs_sum_upto(d - 1)
return s1 + s2 | 644c994ee9b5af280feb233a40df51b519c4b9c6 | 3,910 |
def make_join_conditional(key_columns: KeyColumns, left_alias: str, right_alias: str) -> Composed:
"""
Turn a pair of aliases and a list of key columns into a SQL safe string containing
join conditionals ANDed together.
s.id1 is not distinct from d.id1 and s.id2 is not distinct from d.id2
"""
composed_aliases = {"left_alias": Identifier(left_alias), "right_alias": Identifier(right_alias)}
template = "{left_alias}.{column} {equality} {right_alias}.{column}"
composed_conditionals = [
SQL(template).format(
column=Identifier(c.name),
equality=SQL("=" if c.not_nullable else "is not distinct from"),
**composed_aliases,
)
for c in key_columns
]
return SQL(" and ").join(composed_conditionals) | c0b239598f606f35d3af0cbf8c34168137e05b9c | 3,911 |
def home():
""" Home interface """
return '''<!doctype html>
<meta name="viewport" content="width=device-width, initial-scale=1" />
<body style="margin:0;font-family:sans-serif;color:white">
<form method="POST" action="analyse" enctype="multipart/form-data">
<label style="text-align:center;position:fixed;top:0;bottom:0;width:100%;background-position:center;background-size:cover;background-image:url(https://blog.even3.com.br/wp-content/uploads/2019/04/saiba-como-e-por-que-fazer-crachas-para-eventos-1.png)">
<br /><br />
<h1>Cara-crachá</h1>
<h3 id="processing" style="display:none">Processando...</h3>
<input type="file" name="file" onchange="processing.style.display='block';this.form.submit()" style="display:none" />
</label>
</form>
</body>
''' | d8a9c3449ac56b04ee1514729342ce29469c5c2f | 3,912 |
def _enable_mixed_precision_graph_rewrite_base(opt, loss_scale,
use_v1_behavior):
"""Enables mixed precision. See `enable_mixed_precision_graph_rewrite`."""
opt = _wrap_optimizer(opt, loss_scale, use_v1_behavior=use_v1_behavior)
config.set_optimizer_experimental_options({'auto_mixed_precision': True})
return opt | 8601ae6d24575e2bf5a7057bc06992088d473179 | 3,913 |
import argparse
def get_args() -> ProgramArgs:
"""
utility method that handles the argument parsing via argparse
:return: the result of using argparse to parse the command line arguments
"""
parser = argparse.ArgumentParser(
description="simple assembler/compiler for making it easier to write SHENZHEN.IO programs"
)
parser.add_argument(
'input', type=argparse.FileType(),
help="the input file to ingest"
)
parser.add_argument(
'-o', '--output',
help='the output file path', default='out.asm'
)
parser.add_argument(
'-c', '--chip', choices=shenasm.chips.list_names(), default=shenasm.chips.CHIP_TYPE_MC6000,
help='inform assembler of target chip for better diagnostics'
)
parser.add_argument(
'-v', '--verbose', action='store_true',
help='flag to cause more verbose output during execution'
)
parser.add_argument(
'--dotfile', type=str, default=None,
help='write a graphviz compatible .dot file containing the intermediate representation graph of the input'
)
return parser.parse_args() | d47d0fd4da6fb263bbd12848d3888b435596c092 | 3,914 |
def selection_criteria_1(users, label_of_interest):
"""
Formula for Retirement/Selection score:
x = sum_i=1_to_n (r_i) — sum_j=1_to_m (r_j).
Where first summation contains reliability scores of users who have labeled it as the same
as the label of interest, second summation contains reliability scores of users who have
labeled it differently
Args:
users (list): List of users where each element is a tuple of the form (uid, ulabel,
f1 score)
label_of_interest (int): Label under consideration (left hand summation of formula)
Returns (int): 1 = select the subject id, 0 = don't select
"""
left_sum, right_sum = 0, 0
threshold = 2.0
for user in users:
uid, ulabel, f1_score = user
if ulabel == label_of_interest:
left_sum += f1_score
else:
right_sum += f1_score
if left_sum - right_sum >= threshold:
return 1
else:
return 0 | 8255fd3645d5b50c43006d2124d06577e3ac8f2d | 3,915 |
import requests
from typing import cast
def get_default_product_not_found(product_category_id: str) -> str:
"""Get default product.
When invalid options are provided, the defualt product is returned. Which happens to be unflavoured whey at 2.2 lbs.
This is PRODUCT_INFORMATION.
"""
response = requests.get(f'https://us.myprotein.com/{product_category_id}.variations')
response.raise_for_status()
dom = bs4.BeautifulSoup(response.text, 'html.parser')
# data-child-id is the attribute that contains the canonical product id
product_id_node = dom.find(attrs={'data-child-id': True})
if not product_id_node:
err_msg = f'Could not get data to resolve options to product id. Url: {response.url}'
raise ValueError(err_msg)
return cast(str, product_id_node['data-child-id']) | 4464a56de2ff514a71d5d06b1684f04a9ed8e564 | 3,916 |
import re
def book_number_from_path(book_path: str) -> float:
"""
Parses the book number from a directory string.
Novellas will have a floating point value like "1.1" which indicates that it was the first novella
to be published between book 1 and book 2.
:param book_path: path of the currently parsed book
:return: book number
"""
num = int(re.findall(r'[0-9]{2}', book_path)[-1])
return num / 10 | 087cb0b8cd0c48c003175a05ed0d7bb14ad99ac3 | 3,917 |
def intervals_split_merge(list_lab_intervals):
"""
对界限列表进行融合
e.g.
如['(2,5]', '(5,7]'], 融合后输出为 '(2,7]'
Parameters:
----------
list_lab_intervals: list, 界限区间字符串列表
Returns:
-------
label_merge: 合并后的区间
"""
list_labels = []
# 遍历每个区间, 取得左值右值字符串组成列表
for lab in list_lab_intervals:
for s in lab.split(','):
list_labels.append(s.replace('(', '').replace(')', '').replace(']', ''))
list_lab_vals = [float(lab) for lab in list_labels]
# 取得最大最小值的索引
id_max_val = list_lab_vals.index(max(list_lab_vals))
id_min_val = list_lab_vals.index(min(list_lab_vals))
# 取得最大最小值的字符串
lab_max_interval = list_labels[id_max_val]
lab_min_interval = list_labels[id_min_val]
# 如果右边界限的值为+Inf,则改为')', 其他为']'
l_label = '('
if lab_max_interval == '+Inf':
r_label = ')'
else:
r_label = ']'
label_merge = l_label + lab_min_interval + ',' + lab_max_interval + r_label
return label_merge | a9e99ec6fc51efb78a4884206a72f7f4ad129dd4 | 3,918 |
def antique(bins, bin_method=BinMethod.category):
"""CARTOColors Antique qualitative scheme"""
return scheme('Antique', bins, bin_method) | 718ca4c2b9efede292bb5e8e1eb5128e6200a454 | 3,919 |
import json
def do_request(batch_no, req):
"""execute one request. tail the logs. wait for completion"""
tmp_src = _s3_split_url(req['input'])
cpy_dst = _s3_split_url(req['output'])
new_req = {
"src_bucket": tmp_src[0],
"src_key": tmp_src[1],
"dst_bucket": cpy_dst[0],
"dst_key": cpy_dst[1],
"digests": req["digests"]
}
delete_mismatch = req.get('delete_mismatch', False)
log.info("REQ%s data-rehash request: %s", batch_no, json.dumps(new_req, sort_keys=True, indent=4, separators=(",", ": ")))
code, response = lambdas.invoke_sync(lambdas.DATA_REHASH, Payload=new_req)
data = response['Payload'].read().decode("ascii")
if code != 0:
raise Exception("data-rehash failed to complete: %s" % (data,))
data_obj = json.loads(data)
if data_obj.get('error', None):
if "mismatch" in data_obj['error']:
session = boto3.session.Session()
s3 = session.client('s3', config=botocore.config.Config(read_timeout=300, retries={'max_attempts': 0}))
log.info("REQ%s deleting mismatchfile: Bucket=%s Key=%s", batch_no, tmp_src[0], tmp_src[1])
try:
s3.delete_object(Bucket=tmp_src[0], Key=tmp_src[1])
except Exception as delete_exc:
log.error("REQ%s delete failed", exc_info=delete_exc)
raise Exception("data-rehash returned an error: %s" % (data_obj,))
return data_obj | 6e4b8591abfe8a1c106a0ede1e6aa3f6712afd4a | 3,920 |
import sys
def bigwig_tss_targets(wig_file, tss_list, seq_coords, pool_width=1):
""" Read gene target values from a bigwig
Args:
wig_file: Bigwig filename
tss_list: list of TSS instances
seq_coords: list of (chrom,start,end) sequence coordinates
pool_width: average pool adjacent nucleotides of this width
Returns:
tss_targets:
"""
# initialize target values
tss_targets = np.zeros(len(tss_list), dtype="float16")
# open wig
wig_in = pyBigWig.open(wig_file)
# warn about missing chromosomes just once
warned_chroms = set()
# for each TSS
for tss_i in range(len(tss_list)):
tss = tss_list[tss_i]
# extract sequence coordinates
seq_chrom, seq_start, seq_end = seq_coords[tss.gene_seq]
# determine bin coordinates
tss_bin = (tss.pos - seq_start) // pool_width
bin_start = seq_start + tss_bin * pool_width
bin_end = bin_start + pool_width
# pull values
try:
tss_targets[tss_i] = np.array(
wig_in.values(seq_chrom, bin_start, bin_end), dtype="float32"
).sum()
except RuntimeError:
if seq_chrom not in warned_chroms:
print(
"WARNING: %s doesn't see %s (%s:%d-%d). Setting to all zeros. No additional warnings will be offered for %s"
% (
wig_file,
tss.identifier,
seq_chrom,
seq_start,
seq_end,
seq_chrom,
),
file=sys.stderr,
)
warned_chroms.add(seq_chrom)
# check NaN
if np.isnan(tss_targets[tss_i]):
print(
"WARNING: %s (%s:%d-%d) pulled NaN from %s. Setting to zero."
% (tss.identifier, seq_chrom, seq_start, seq_end, wig_file),
file=sys.stderr,
)
tss_targets[tss_i] = 0
# close wig file
wig_in.close()
return tss_targets | 23e2ffb41e86ff4de72a239bd59841b37025a9ed | 3,921 |
def _robot_barcode(event: Message) -> str:
"""Extracts a robot barcode from an event message.
Args:
event (Message): The event
Returns:
str: robot barcode
"""
return str(
next(
subject["friendly_name"] # type: ignore
for subject in event.message["event"]["subjects"] # type: ignore
if subject["role_type"] == "robot" # type: ignore
)
) | 5ffb6567ebb103fc534390d13876d9c1fa956169 | 3,922 |
def build_dist(srcdir, destdir='.', build_type='bdist_egg'):
"""
Builds a distribution using the specified source directory and places
it in the specified destination directory.
srcdir: str
Source directory for the distribution to be built.
destdir: str
Directory where the built distribution file will be placed.
build_type: str
The type of distribution to be built. Default is 'bdist_egg'.
"""
startdir = os.getcwd()
destdir = os.path.abspath(os.path.expanduser(destdir)).replace('\\','/')
srcdir = os.path.abspath(os.path.expanduser(srcdir)).replace('\\','/')
setupname = os.path.join(srcdir, 'setup.py')
if not has_setuptools():
setupname = make_new_setupfile(setupname)
dirfiles = set(os.listdir(destdir))
print "building distribution in %s" % srcdir
cmd = [sys.executable.replace('\\','/'),
os.path.basename(setupname),
]
cmd.extend(build_type.split(' '))
cmd.extend(['-d', destdir])
os.chdir(srcdir)
# FIXME: fabric barfs when running this remotely due to some unicode
# output that it can't handle, so we first save the output to
# a file with unicode stripped out
out = codecs.open('_build_.out', 'wb',
encoding='ascii', errors='replace')
print 'running command: %s' % ' '.join(cmd)
try:
p = subprocess.Popen(' '.join(cmd),
stdout=out, stderr=subprocess.STDOUT,
shell=True)
p.wait()
finally:
out.close()
with open('_build_.out', 'r') as f:
print f.read()
os.chdir(startdir)
newfiles = set(os.listdir(destdir)) - dirfiles
if len(newfiles) != 1:
raise RuntimeError("expected one new file in in destination directory but found %s" %
list(newfiles))
if p.returncode != 0:
raise RuntimeError("problem building distribution in %s. (return code = %s)" %
(srcdir, p.returncode))
distfile = os.path.join(destdir, newfiles.pop())
print 'new distribution file is %s' % distfile
return distfile | bd5ac5cbffb88a3ff0de3cf54f615ef6696273a8 | 3,923 |
from typing import List
from typing import Union
def check_thirteen_fd(fds: List[Union[BI, FakeBI]]) -> str:
"""识别十三段形态
:param fds: list
由远及近的十三段形态
:return: str
"""
v = Signals.Other.value
if len(fds) != 13:
return v
direction = fds[-1].direction
fd1, fd2, fd3, fd4, fd5, fd6, fd7, fd8, fd9, fd10, fd11, fd12, fd13 = fds
max_high = max([x.high for x in fds])
min_low = min([x.low for x in fds])
if direction == Direction.Down:
if min_low == fd13.low and max_high == fd1.high:
# aAbBc式底背驰,fd2-fd6构成A,fd8-fd12构成B
if min(fd2.high, fd4.high, fd6.high) > max(fd2.low, fd4.low, fd6.low) > fd8.high \
and min(fd8.high, fd10.high, fd12.high) > max(fd8.low, fd10.low, fd12.low) \
and min(fd2.low, fd4.low, fd6.low) > max(fd8.high, fd10.high, fd12.high) \
and fd13.power < fd7.power:
v = Signals.LA0.value
# ABC式底背驰,A5B3C5
if fd5.low < min(fd1.low, fd3.low) and fd9.high > max(fd11.high, fd13.high) \
and fd8.high > fd6.low and fd1.high - fd5.low > fd9.high - fd13.low:
v = Signals.LA0.value
if fd13.power < max(fd11.power, fd9.power):
v = Signals.LB0.value
# ABC式底背驰,A3B5C5
if fd3.low < min(fd1.low, fd5.low) and fd9.high > max(fd11.high, fd13.high) \
and min(fd4.high, fd6.high, fd8.high) > max(fd4.low, fd6.low, fd8.low) \
and fd1.high - fd3.low > fd9.high - fd13.low:
v = Signals.LA0.value
if fd13.power < max(fd11.power, fd9.power):
v = Signals.LB0.value
# ABC式底背驰,A5B5C3
if fd5.low < min(fd1.low, fd3.low) and fd11.high > max(fd9.high, fd13.high) \
and min(fd6.high, fd8.high, fd10.high) > max(fd6.low, fd8.low, fd10.low) \
and fd1.high - fd5.low > fd11.high - fd13.low:
v = Signals.LA0.value
if fd13.power < fd11.power:
v = Signals.LB0.value
elif direction == Direction.Up:
if max_high == fd13.high and min_low == fd1.low:
# aAbBC式顶背驰,fd2-fd6构成A,fd8-fd12构成B
if fd8.low > min(fd2.high, fd4.high, fd6.high) >= max(fd2.low, fd4.low, fd6.low) \
and min(fd8.high, fd10.high, fd12.high) >= max(fd8.low, fd10.low, fd12.low) \
and max(fd2.high, fd4.high, fd6.high) < min(fd8.low, fd10.low, fd12.low) \
and fd13.power < fd7.power:
v = Signals.SA0.value
# ABC式顶背驰,A5B3C5
if fd5.high > max(fd3.high, fd1.high) and fd9.low < min(fd11.low, fd13.low) \
and fd8.low < fd6.high and fd5.high - fd1.low > fd13.high - fd9.low:
v = Signals.SA0.value
# C内部顶背驰,形成双重顶背驰
if fd13.power < max(fd11.power, fd9.power):
v = Signals.SB0.value
# ABC式顶背驰,A3B5C5
if fd3.high > max(fd5.high, fd1.high) and fd9.low < min(fd11.low, fd13.low) \
and min(fd4.high, fd6.high, fd8.high) > max(fd4.low, fd6.low, fd8.low) \
and fd3.high - fd1.low > fd13.high - fd9.low:
v = Signals.SA0.value
# C内部顶背驰,形成双重顶背驰
if fd13.power < max(fd11.power, fd9.power):
v = Signals.SB0.value
# ABC式顶背驰,A5B5C3
if fd5.high > max(fd3.high, fd1.high) and fd11.low < min(fd9.low, fd13.low) \
and min(fd6.high, fd8.high, fd10.high) > max(fd6.low, fd8.low, fd10.low) \
and fd5.high - fd1.low > fd13.high - fd11.low:
v = Signals.SA0.value
# C内部顶背驰,形成双重顶背驰
if fd13.power < fd11.power:
v = Signals.SB0.value
else:
raise ValueError("direction 的取值错误")
return v | 95c308c2560cc7a337e4a1719836c3df74ab1bbe | 3,924 |
from typing import List
def set_process_tracking(template: str, channels: List[str]) -> str:
"""This function replaces the template placeholder for the process tracking with the correct process tracking.
Args:
template: The template to be modified.
channels: The list of channels to be used.
Returns:
The modified template.
"""
tracking = ""
for channel in channels:
tracking += " ULong64_t {ch}_processed = 0;\n".format(ch=channel)
tracking += " std::mutex {ch}_bar_mutex;\n".format(ch=channel)
tracking += " auto c_{ch} = {ch}_df_final.Count();\n".format(ch=channel)
tracking += " c_{ch}.OnPartialResultSlot(quantile, [&{ch}_bar_mutex, &{ch}_processed, &quantile](unsigned int /*slot*/, ULong64_t /*_c*/) {{".format(
ch=channel
)
tracking += (
"\n std::lock_guard<std::mutex> lg({ch}_bar_mutex);\n".format(
ch=channel
)
)
tracking += " {ch}_processed += quantile;\n".format(ch=channel)
tracking += ' Logger::get("main - {ch} Channel")->info("{{}} Events processed ...", {ch}_processed);\n'.format(
ch=channel
)
tracking += " });\n"
return template.replace("{PROGRESS_CALLBACK}", tracking) | 0cf720bd56a63939541a06e60492472f92c4e589 | 3,925 |
def solve(instance: Instance) -> InstanceSolution:
"""Solves the P||Cmax problem by using a genetic algorithm.
:param instance: valid problem instance
:return: generated solution of a given problem instance
"""
generations = 512
population_size = 128
best_specimens_number = 32
generator = solution_generator(instance, population_size, best_specimens_number)
best_solution = GeneticSolution(instance, [0 for _ in range(len(instance.tasks_durations))])
for _, solution in zip(range(generations), generator):
best_solution = min(best_solution, solution, key=lambda x: x.total_time)
return best_solution.to_instance_solution() | f8a82a066de29e0c149c3c5f01821af080619764 | 3,926 |
def payee_transaction():
"""Last transaction for the given payee."""
entry = g.ledger.attributes.payee_transaction(request.args.get("payee"))
return serialise(entry) | 47a21c7921cae4be30b6eefbbde43bfdf5a38013 | 3,927 |
def represent(element: Element) -> str:
"""Represent the regular expression as a string pattern."""
return _Representer().visit(element) | dfd44499aa1f63248c1a6632131974b242fedf95 | 3,928 |
def read_dynamo_table(gc, name, read_throughput=None, splits=None):
"""
Reads a Dynamo table as a Glue DynamicFrame.
:param awsglue.context.GlueContext gc: The GlueContext
:param str name: The name of the Dynamo table
:param str read_throughput: Optional read throughput - supports values from "0.1" to "1.5", inclusive.
:param str splits: Optional number of input splits - defaults to the SparkContext default parallelism.
:rtype: awsglue.dynamicframe.DynamicFrame
"""
connection_options = {
'dynamodb.input.tableName': name,
'dynamodb.splits': str(splits or gc.spark_session.sparkContext.defaultParallelism)
}
if read_throughput:
connection_options['dynamodb.throughput.read.percent'] = str(read_throughput)
return gc.create_dynamic_frame_from_options(connection_type='dynamodb', connection_options=connection_options) | 5f789626cb3fc8004532cc59bdae128b744b111e | 3,929 |
import six
def convert_to_bytes(text):
"""
Converts `text` to bytes (if it's not already).
Used when generating tfrecords. More specifically, in function call `tf.train.BytesList(value=[<bytes1>, <bytes2>, ...])`
"""
if six.PY2:
return convert_to_str(text) # In python2, str is byte
elif six.PY3:
if isinstance(text, bytes):
return text
else:
return convert_to_unicode(text).encode('utf-8')
else:
raise ValueError("Not running on Python2 or Python 3?") | da10be9cb88a80f66becead41400b3a4eb6152a2 | 3,930 |
from typing import OrderedDict
def xreplace_constrained(exprs, make, rule=None, costmodel=lambda e: True, repeat=False):
"""
Unlike ``xreplace``, which replaces all objects specified in a mapper,
this function replaces all objects satisfying two criteria: ::
* The "matching rule" -- a function returning True if a node within ``expr``
satisfies a given property, and as such should be replaced;
* A "cost model" -- a function triggering replacement only if a certain
cost (e.g., operation count) is exceeded. This function is optional.
Note that there is not necessarily a relationship between the set of nodes
for which the matching rule returns True and those nodes passing the cost
model check. It might happen for example that, given the expression ``a + b``,
all of ``a``, ``b``, and ``a + b`` satisfy the matching rule, but only
``a + b`` satisfies the cost model.
:param exprs: The target SymPy expression, or a collection of SymPy expressions.
:param make: Either a mapper M: K -> V, indicating how to replace an expression
in K with a symbol in V, or a function, used to construct new, unique
symbols. Such a function should take as input a parameter, used to
enumerate the new symbols.
:param rule: The matching rule (a lambda function). May be left unspecified if
``make`` is a mapper.
:param costmodel: The cost model (a lambda function, optional).
:param repeat: Repeatedly apply ``xreplace`` until no more replacements are
possible (optional, defaults to False).
"""
found = OrderedDict()
rebuilt = []
# Define /replace()/ based on the user-provided /make/
if isinstance(make, dict):
rule = rule if rule is not None else (lambda i: i in make)
replace = lambda i: make[i]
else:
assert callable(make) and callable(rule)
def replace(expr):
if isinstance(make, dict):
return make[expr]
temporary = found.get(expr)
if temporary:
return temporary
else:
temporary = make(replace.c)
found[expr] = temporary
replace.c += 1
return temporary
replace.c = 0 # Unique identifier for new temporaries
def run(expr):
if expr.is_Atom or expr.is_Indexed:
return expr, rule(expr)
elif expr.is_Pow:
base, flag = run(expr.base)
if flag and costmodel(base):
return expr.func(replace(base), expr.exp, evaluate=False), False
else:
return expr.func(base, expr.exp, evaluate=False), flag
else:
children = [run(a) for a in expr.args]
matching = [a for a, flag in children if flag]
other = [a for a, _ in children if a not in matching]
if matching:
matched = expr.func(*matching, evaluate=False)
if len(matching) == len(children) and rule(expr):
# Go look for longer expressions first
return matched, True
elif rule(matched) and costmodel(matched):
# Replace what I can replace, then give up
rebuilt = expr.func(*(other + [replace(matched)]), evaluate=False)
return rebuilt, False
else:
# Replace flagged children, then give up
replaced = [replace(e) for e in matching if costmodel(e)]
unreplaced = [e for e in matching if not costmodel(e)]
rebuilt = expr.func(*(other + replaced + unreplaced), evaluate=False)
return rebuilt, False
return expr.func(*other, evaluate=False), False
# Process the provided expressions
for expr in as_tuple(exprs):
assert expr.is_Equality
root = expr.rhs
while True:
ret, _ = run(root)
if repeat and ret != root:
root = ret
else:
rebuilt.append(expr.func(expr.lhs, ret))
break
# Post-process the output
found = [Eq(v, k) for k, v in found.items()]
return found + rebuilt, found | f24f0bb1356c5613c012fe405691b1b493ffc6a2 | 3,931 |
import re
def get_comp_rules() -> str:
"""
Download the comp rules from Wizards site and return it
:return: Comp rules text
"""
response = download_from_wizards(COMP_RULES)
# Get the comp rules from the website (as it changes often)
# Also split up the regex find so we only have the URL
comp_rules_url: str = re.findall(r"href=\".*\.txt\"", response)[0][6:-1]
response = download_from_wizards(comp_rules_url).replace("’", "'")
return response | dbb48b391305199182a2bf66bed62dcd91dc0071 | 3,932 |
def delete_vpc(vpc_id):
"""Delete a VPC."""
client = get_client("ec2")
params = {}
params["VpcId"] = vpc_id
return client.delete_vpc(**params) | 5c1a043d837ff1bc0cab41ccdbe784688966a275 | 3,933 |
def test_network_xor(alpha = 0.1, iterations = 1000):
"""Creates and trains a network against the XOR/XNOR data"""
n, W, B = network_random_gaussian([2, 2, 2])
X, Y = xor_data()
return n.iterate_network(X, Y, alpha, iterations) | cb05f01f589d7e224d1a0a87f594a075228741fc | 3,934 |
from pathlib import Path
import shutil
def assemble_book(draft__dir: Path, work_dir: Path, text_dir: Path) -> Path:
"""Merge contents of draft book skeleton with test-specific files for
the book contents.
"""
book_dir = work_dir / "test-book"
# Copy skeleton from draft__dir
shutil.copytree(draft__dir, book_dir)
# Add metadata and text files for test book
if (text_dir / "content.opf").is_file():
shutil.copy(text_dir / "content.opf", book_dir / "src" / "epub")
for file in text_dir.glob("*.xhtml"):
shutil.copy(file, book_dir / "src" / "epub" / "text")
# Rebuild file metadata
must_run(f"se print-manifest-and-spine --in-place {book_dir}")
must_run(f"se print-toc --in-place {book_dir}")
return book_dir | 51ec6ed21760feeff3eeee6ee6fa802383b5afa3 | 3,935 |
def merid_advec_spharm(arr, v, radius):
"""Meridional advection using spherical harmonics."""
_, d_dy = horiz_gradient_spharm(arr, radius)
return v * d_dy | 7973f99b60ad9d94b6858d28d8877f5c814160c2 | 3,936 |
def run_win_pct(team_name, df):
"""
Function that calculates a teams winning percentage Year over Year (YoY)
Calculation:
Number of wins by the total number of competitions.
Then multiply by 100 = win percentage.
Number of loses by the total number of competitions.
Then multiply by 100 = loss percentage
this function also takes into account the home and away win/loss
percentages.
:param team_name: Takes in the state of the team_names dropdown
:return:a dataframe That returns percentages for specific teams
"""
df['home_team'] = df['home_team'].str.lower()
df['away_team'] = df['away_team'].str.lower()
team_name = team_name.lower()
df_home = df[df['home_team'] == team_name]
df_away = df[df['away_team'] == team_name]
frames = [df_home,df_away]
df_fill = pd.concat(frames)
df = home_vs_away(df_fill, team_name)
home_matches = df[df['home_team'] == team_name]
away_matches = df[df['away_team'] == team_name]
home_matches = home_matches.drop(columns = ['away_team'])
away_matches = away_matches.drop(columns = ['home_team'])
#wins per season
home_team_win = home_matches.groupby(["home_team","dateYear"])["outcome"].apply(
lambda x: x[x.str.contains("win")].count()).reset_index()
away_team_win = away_matches.groupby(['away_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('win')].count()).reset_index()
home_team_loss = home_matches.groupby(['home_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('lose')].count()).reset_index()
away_team_loss = away_matches.groupby(['away_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('lose')].count()).reset_index()
home_team_tie = home_matches.groupby(['home_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('draw')].count()).reset_index()
away_team_tie = away_matches.groupby(['away_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('draw')].count()).reset_index()
#matches played per season
searchFor = ['win','lose','draw']
matches_home = home_matches.groupby(['home_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('|'.join(searchFor))].count()).reset_index()
matches_away = away_matches.groupby(['away_team', 'dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('|'.join(searchFor))].count()).reset_index()
#goals for and against
match_numbers = matches_home.merge(matches_away, how='left', left_on='dateYear', right_on='dateYear')
loss_merge = home_team_loss.merge(away_team_loss, how='left', left_on='dateYear', right_on='dateYear')
tie_merge = home_team_tie.merge(away_team_tie, how='left', left_on='dateYear', right_on='dateYear')
fin = home_team_win.merge(away_team_win, how = 'left', left_on='dateYear', right_on='dateYear')
fin['Total Wins'] = fin['outcome_x'] + fin['outcome_y']
fin['Total Losses'] = loss_merge['outcome_x'] + loss_merge['outcome_y']
fin['Total Draws'] = tie_merge['outcome_x'] + tie_merge['outcome_y']
fin['Total Matches'] = match_numbers['outcome_x'] + match_numbers['outcome_y']
fin['Win PCT'] = (fin['Total Wins'] / fin['Total Matches'] * 100).round(2)
fin['Loss PCT'] = (fin['Total Losses'] / fin['Total Matches'] * 100).round(2)
fin['Draw PCT'] = (fin['Total Draws'] / fin['Total Matches'] * 100).round(2)
#home match percentage
fin['Home Win PCT'] = (home_team_win['outcome'] / matches_home['outcome'] * 100).round(2)
fin['Away Win PCT'] = (away_team_win['outcome'] / matches_away['outcome'] * 100).round(2)
fin['Home Loss PCT'] = (home_team_loss['outcome'] / matches_home['outcome'] * 100).round(2)
fin['Away Loss PCT'] = (away_team_loss['outcome'] / matches_away['outcome'] * 100).round(2)
return fin | 3fc071cd7e89f68216286b0b6422a95ce8f690f6 | 3,937 |
def get_container_info(pi_status):
"""
Expects a dictionary data structure that include keys and values of the
parameters that describe the containers running in a Raspberry Pi computer.
Returns the input dictionary populated with values measured from the current
status of one or more containers running in the Pi.
"""
pi_status['containers'] = []
if len(client.containers()) == 0:
print 'No container running'
new_container={
'id': 'None',
'cpuUsage': '0.0',
'memUsage': '0.0',
'name': 'None', # the client.container() returns a list of names.
'status': 'None', # as a temporary solution, I take the first name
'image': 'None', # of the list.
'port_host': '0', # the client.container() returns a list of ports
'port_container': '0'} # getting the first, is a tmp solution
pi_status['containers'].append(new_container)
else:
print 'num container %d' % len(client.containers())
for container in client.containers():
cmd = "docker stats %s --no-stream | grep %s | awk \'{print $2}\' " % (container['Id'], container['Id'])
cpuUsage = system_call(cmd)
cpuUsage_str = cpuUsage.replace("\n", "")
cpuUsage_str = cpuUsage_str.replace("%", "")
cmd = "docker stats %s --no-stream | grep %s | awk \'{print $6}\' " % (container['Id'], container['Id'])
memUsage = system_call(cmd)
memUsage_str = memUsage.replace("\n", "")
memUsage_str = memUsage_str.replace("%", "")
#dict_port_host= container['Ports'][0]
#p_int=dict_port_host['PublicPort']
#port_host_str= str(p_int).replace("\n", "")
new_container={
'id': container['Id'],
'cpuUsage': cpuUsage_str,
'memUsage': memUsage_str,
'name': container['Names'][0], # the client.container() returns a list of names.
'status': container['Status'], # as a temporary solution, I take the first name
'image': container['Image'], # of the list.
'port_host': '80', # the client.container() returns a list of ports
'port_container': '8000'} # getting the first, is a tmp solution
pi_status['containers'].append(new_container)
return (len((pi_status['containers']))) | a488e7afa9c2e003edb3138c1d78e434921dbf3e | 3,938 |
import math
def formatSI(n: float) -> str:
"""Format the integer or float n to 3 significant digits + SI prefix."""
s = ''
if n < 0:
n = -n
s += '-'
if type(n) is int and n < 1000:
s = str(n) + ' '
elif n < 1e-22:
s = '0.00 '
else:
assert n < 9.99e26
log = int(math.floor(math.log10(n)))
i, j = divmod(log, 3)
for _try in range(2):
templ = '%.{}f'.format(2 - j)
val = templ % (n * 10 ** (-3 * i))
if val != '1000':
break
i += 1
j = 0
s += val + ' '
if i != 0:
s += 'yzafpnum kMGTPEZY'[i + 8]
return s | ddbbb70e66d368253d29c3223eee7a5926518efd | 3,939 |
import scipy
def pemp(stat, stat0):
""" Computes empirical values identically to bioconductor/qvalue empPvals """
assert len(stat0) > 0
assert len(stat) > 0
stat = np.array(stat)
stat0 = np.array(stat0)
m = len(stat)
m0 = len(stat0)
statc = np.concatenate((stat, stat0))
v = np.array([True] * m + [False] * m0)
perm = np.argsort(-statc, kind="mergesort") # reversed sort, mergesort is stable
v = v[perm]
u = np.where(v)[0]
p = (u - np.arange(m)) / float(m0)
# ranks can be fractional, we round down to the next integer, ranking returns values starting
# with 1, not 0:
ranks = np.floor(scipy.stats.rankdata(-stat)).astype(int) - 1
p = p[ranks]
p[p <= 1.0 / m0] = 1.0 / m0
return p | 7d046666687ede0b671c00d5c691ac520179e11f | 3,940 |
def help_message() -> str:
"""
Return help message.
Returns
-------
str
Help message.
"""
msg = f"""neocities-sync
Sync local directories with neocities.org sites.
Usage:
neocities-sync options] [--dry-run] [-c CONFIG] [-s SITE1] [-s SITE2] ...
Options:
-C CONFIG_FILE Path to the config file to use.
(defaults to "{config_file_path_unexpanded}".)
-s SITE Which site to sync (as specified in the config file).
The default is to sync all sites in the config file.
--dry-run Do not actually upload anything.
-v Verbose output.
-q Quiet output.
-h, --help Show this help message and exit.
Config file:
The config file is an ini file, located at "{config_file_path_unexpanded}".
Each section of the config file describes a different site (the name of the
section doesn't need to be the same as the site's domain, since the api_key
suffices to identify the site).
The keys of the config file are:
api_key (str) [required]
The api key of the site.
root_dir (path) [required]
The local directory to sync.
sync_disallowed (yes/no) [default: no]
Whether to sync files that are only allowed for paying users.
sync_hidden (yes/no) [default: no]
Whether to sync hidden files.
sync_vcs (yes/no) [default: no]
Whether to sync version control files.
allowed_extensions (list of str) [default: not set]
Which file extensions to sync. If not set, all files are synced.
remove_empty_dirs (yes/no) [default: yes]
Whether to remove empty directories after sync.
Example config:
[site1]
api_key = 6b9b522e7d8d93e88c464aafc421a61b
root_dir = ~/path/to/site1
allowed_extensions = .html .css .js
remove_empty_dirs = no
[site2]
api_key = 78559e6ebc35fe33eec21de05666a243
root_dir = /var/www/path/to/site2
allowed_extensions = .html .css .js .woff2
.neocitiesignore
In any subdirectory of the root directory, a file named ".neocitiesignore"
can be used to specify which files to ignore. The syntax is the same as
the one for ".gitignore".
Credits:
This software was developed by Andre Kugland <[email protected]>."""
return msg | 8c2d0c31513e36c1ef1c9f0b096d264449dafdee | 3,941 |
def fuzzyCompareDouble(p1, p2):
"""
compares 2 double as points
"""
return abs(p1 - p2) * 100000. <= min(abs(p1), abs(p2)) | e2a93a993147e8523da0717d08587250003f9269 | 3,942 |
def filter_date_df(date_time, df, var="date"):
"""Filtrar dataframe para uma dada lista de datas.
Parameters
----------
date_time: list
list with dates.
df: pandas.Dataframe
var: str
column to filter, default value is "date" but can be adaptable for other ones.
Returns
-------
df_filter: pandas.Dataframe
Examples
--------
>>> file1 = './data/WIN$N_1M_2015.08.12_2015.12.30_.csv',
>>> file2 = './data/WIN$N_10M_2013.11.08_2021.01.22_.csv'
>>> dates = filter_overlapping_dates(file1, file2)
>>> df1 = pandas.read_csv(file1)
>>> filter_date_df(dates_overlapping, df1).head()
date hour open high low close real_volume tick_volume
0 2015.08.12 09:00:00 50280 50430 50255 50405 976 217
1 2015.08.12 09:01:00 50405 50440 50335 50400 1589 445
2 2015.08.12 09:02:00 50395 50410 50355 50355 465 102
3 2015.08.12 09:03:00 50350 50360 50320 50325 474 150
4 2015.08.12 09:04:00 50325 50330 50090 50190 2078 747
"""
filters = [True if date in date_time else False for date in df[var]]
df_filter = df[filters]
df_filter = df_filter.drop(columns=["spread"], errors="ignore")
df_filter = df_filter.dropna().drop_duplicates()
df_filter = df_filter.sort_values(by=["date", "hour"])
df_filter = df_filter.reset_index(drop=True)
df_filter = format_hour(df_filter)
return df_filter | 6d3002917ef0786e8b128a2a02df3fabb9997aab | 3,943 |
import urllib
def pproxy_desired_access_log_line(url):
"""Return a desired pproxy log entry given a url."""
qe_url_parts = urllib.parse.urlparse(url)
protocol_port = '443' if qe_url_parts.scheme == 'https' else '80'
return 'http {}:{}'.format(qe_url_parts.hostname, protocol_port) | 4c056b1d2cc11a72cf63400734807b9b074f147c | 3,944 |
import socket
def unused_port() -> int:
"""Return a port that is unused on the current host."""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("127.0.0.1", 0))
return s.getsockname()[1] | 26d72e1a529edd37b14ac746bcb4082c1d1b9061 | 3,945 |
def get_axioma_risk_free_rate(conn) :
"""
Get the USD risk free rate provided by Axioma and converted it into
a daily risk free rate assuming a 252 trading data calendar.
"""
query = """
select
data_date,
Risk_Free_Rate
from
axioma_currency
where
currencycode = 'USD'
order by
data_date
"""
df = pd.read_sql_query(query, conn.sql.CONN)
df['Risk_Free_Rate'] = df['Risk_Free_Rate'].astype('float32')
df[RFR] = (1 + df['Risk_Free_Rate']) ** (1.0/252.0) - 1
df.drop(columns = ['Risk_Free_Rate'], inplace = True)
return df | 2c6c680ef36c247b67c481ff4dde685afc4bad4d | 3,946 |
def update_user_count_estimated(set_of_contributors, anonymous_coward_comments_counter):
"""
Total user count estimate update in the presence of anonymous users.
Currently we use a very simplistic model for estimating the full user count.
Inputs: - set_of_contributors: A python set of user ids.
- anonymous_coward_comments_counter: The number of comments posted by anonymous user(s).
Output: estimated_anonymous_contributor_count: The estimated number of users active in the information cascade.
"""
eponymous_user_count = len(set_of_contributors)
if anonymous_coward_comments_counter > 0:
# TODO: Of course, I can use a much more sophisticated model.
estimated_anonymous_user_count = (1 + anonymous_coward_comments_counter)/2
else:
estimated_anonymous_user_count = 0.0
estimated_user_count = eponymous_user_count + estimated_anonymous_user_count
return estimated_user_count | 165160c8c0284743856c17aba90cffaa78f2ba11 | 3,947 |
import numbers
import time
import warnings
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, return_estimator=False,
split_progress=None, candidate_progress=None,
error_score=np.nan):
"""override the sklearn.model_selection._validation._fit_and_score
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
verbose : int
The verbosity level.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : bool, default=False
Compute and return score on training set.
return_parameters : bool, default=False
Return parameters that has been used for the estimator.
split_progress : {list, tuple} of int, default=None
A list or tuple of format (<current_split_id>, <total_num_of_splits>).
candidate_progress : {list, tuple} of int, default=None
A list or tuple of format
(<current_candidate_id>, <total_number_of_candidates>).
return_n_test_samples : bool, default=False
Whether to return the ``n_test_samples``.
return_times : bool, default=False
Whether to return the fit/score times.
return_estimator : bool, default=False
Whether to return the fitted estimator.
Returns
-------
result : dict with the following attributes
train_scores : dict of scorer name -> float
Score on training set (for all the scorers),
returned only if `return_train_score` is `True`.
test_scores : dict of scorer name -> float
Score on testing set (for all the scorers).
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None
The parameters that have been evaluated.
estimator : estimator object
The fitted estimator.
fit_failed : bool
The estimator failed to fit.
"""
if estimator.__class__.__name__ != 'KerasGBatchClassifier':
return _sk_fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params,
return_train_score=return_train_score,
return_parameters=return_parameters,
return_n_test_samples=return_n_test_samples,
return_times=return_times,
return_estimator=return_estimator,
split_progress=split_progress,
candidate_progress=candidate_progress,
error_score=error_score)
if not isinstance(error_score, numbers.Number) and error_score != 'raise':
raise ValueError(
"error_score must be the string 'raise' or a numeric value. "
"(Hint: if using 'raise', please make sure that it has been "
"spelled correctly.)"
)
progress_msg = ""
if verbose > 2:
if split_progress is not None:
progress_msg = f" {split_progress[0]+1}/{split_progress[1]}"
if candidate_progress and verbose > 9:
progress_msg += (f"; {candidate_progress[0]+1}/"
f"{candidate_progress[1]}")
if verbose > 1:
if parameters is None:
params_msg = ''
else:
sorted_keys = sorted(parameters) # Ensure deterministic o/p
params_msg = (', '.join(f'{k}={parameters[k]}'
for k in sorted_keys))
if verbose > 9:
start_msg = f"[CV{progress_msg}] START {params_msg}"
print(f"{start_msg}{(80 - len(start_msg)) * '.'}")
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_fit_params(X, fit_params, train)
if parameters is not None:
# clone after setting parameters in case any parameters
# are estimators (like pipeline steps)
# because pipeline doesn't clone steps in fit
cloned_parameters = {}
for k, v in parameters.items():
cloned_parameters[k] = clone(v, safe=False)
estimator = estimator.set_params(**cloned_parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
result = {}
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
if isinstance(scorer, dict):
test_scores = {name: error_score for name in scorer}
if return_train_score:
train_scores = test_scores.copy()
else:
test_scores = error_score
if return_train_score:
train_scores = error_score
warnings.warn("Estimator fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%s" %
(error_score, format_exc()),
FitFailedWarning)
result["fit_failed"] = True
else:
result["fit_failed"] = False
fit_time = time.time() - start_time
test_scores = estimator.evaluate(X_test, y_test, scorer,
error_score)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_scores = estimator.evaluate(
X_train, y_train, scorer, error_score
)
if verbose > 1:
total_time = score_time + fit_time
end_msg = f"[CV{progress_msg}] END "
result_msg = params_msg + (";" if params_msg else "")
if verbose > 2:
if isinstance(test_scores, dict):
for scorer_name in sorted(test_scores):
result_msg += f" {scorer_name}: ("
if return_train_score:
scorer_scores = train_scores[scorer_name]
result_msg += f"train={scorer_scores:.3f}, "
result_msg += f"test={test_scores[scorer_name]:.3f})"
else:
result_msg += ", score="
if return_train_score:
result_msg += (f"(train={train_scores:.3f}, "
f"test={test_scores:.3f})")
else:
result_msg += f"{test_scores:.3f}"
result_msg += f" total time={logger.short_format_time(total_time)}"
# Right align the result_msg
end_msg += "." * (80 - len(end_msg) - len(result_msg))
end_msg += result_msg
print(end_msg)
result["test_scores"] = test_scores
if return_train_score:
result["train_scores"] = train_scores
if return_n_test_samples:
result["n_test_samples"] = _num_samples(X_test)
if return_times:
result["fit_time"] = fit_time
result["score_time"] = score_time
if return_parameters:
result["parameters"] = parameters
if return_estimator:
result["estimator"] = estimator
return result | 6330fb95709e74471b72b58297b3ce3c7d483449 | 3,948 |
from typing import Dict
from typing import List
def prettify_eval(set_: str, accuracy: float, correct: int, avg_loss: float, n_instances: int,
stats: Dict[str, List[int]]):
"""Returns string with prettified classification results"""
table = 'problem_type accuracy\n'
for k in sorted(stats.keys()):
accuracy_ = stats[k][0]/stats[k][1]
accuracy_ = accuracy_*100
table += k
table += ' '
table += '{:.2f}%\n'.format(accuracy_)
return '\n' + set_ + ' set average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
avg_loss, correct, n_instances, accuracy) + table + '\n' | 5e5ba8ffa62668e245daa2ada9fc09747b5b6dd2 | 3,949 |
def GetRecentRevisions(repository, project=None, num_revisions=20):
"""Get Recent Revisions.
Args:
repository: models.Repository, the repository whose revisions to get
we ought.
project: models.Project, restrict the query to a given project.
num_revisions: int, maximum number of revisions to fetch.
Returns:
list of models.Revisions
"""
q = db.Query(models.Revision).filter('repository_name =', repository.name)
# TODO(nicksantos): filter by project once the revisions have projects.
# But talk to dbentley to make sure that we really want to do this.
# if project:
# q.filter('project =', project)
# TODO(dbentley): eventually, it would be great to use the partial
# order implied in the actual VCS.
q.order('-time')
q.order('-first_seen')
return list(q.fetch(num_revisions)) | da775b43e0c4cee77006a12b5c1536a328f8a210 | 3,950 |
def load_location(doc_name):
"""Load a location from db by name."""
doc_ref = get_db().collection("locations").document(doc_name)
doc = doc_ref.get()
if not doc.exists:
return None
else:
return doc.to_dict() | 900450ec3a1c033a9c11baed611170457660754f | 3,951 |
def plotMultiROC(y_true, # list of true labels
y_scores, # array of scores for each class of shape [n_samples, n_classes]
title = 'Multiclass ROC Plot',
n_points=100, # reinterpolates to have exactly N points
labels = None, # list of labels for each class
threshdot = None,
plot=True, # 1/0. If 0, returns plotly json object, but doesnt plot
):
"""
Makes a multiclass ROC plot. Can also be used for binary ROC plot
"""
y_true = np.array(y_true)
y_scores = np.array(y_scores)
if y_scores.ndim == 1: # convert to [n_samples, n_classes] even if 1 class
y_scores = np.atleast_2d(y_scores).T
N, n_classes = y_scores.shape
if n_classes == 1: # needed to avoid inverting when doing binary classification
y_scores *= -1
if threshdot is not None:
threshdot *= -1
# calc ROC curves & AUC
fpr = dict()
tpr = dict()
thresh = dict()
thresh_txt = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], thresh[i] = sk.metrics.roc_curve(y_true == i, y_scores[:, i])
roc_auc[i] = sk.metrics.auc(fpr[i], tpr[i])
if n_points is not None:
x = np.linspace(0, 1, n_points)
indxs = np.searchsorted(tpr[i], x)
tpr[i] = tpr[i][indxs]
fpr[i] = fpr[i][indxs]
thresh[i] = thresh[i][indxs]
thresh_txt[i] = ['T=%.4f' % t for t in thresh[i]]
if labels is None:
labels = ['C%d' % n for n in range(1, n_classes+1)]
labels = [str(x) for x in labels] # convert labels to str
# make traces
traces = []
[traces.append(go.Scatter(y=tpr[i], x=fpr[i], name=labels[i] + '. AUC= %.2f' % (roc_auc[i]), text=thresh_txt[i],
legendgroup=str(i), line={'width': 1}))
for i in range(n_classes)]
traces += [go.Scatter(y=[0, 1], x=[0, 1], name='Random classifier', line={'width': 1, 'dash': 'dot'})]
if threshdot is not None:
for i in range(n_classes):
c_indx = (np.abs(thresh[i]-threshdot)).argmin()
traces += [go.Scatter(x=[fpr[i][c_indx]]*2, y=[tpr[i][c_indx]]*2, mode='markers',
name='Threshold', legendgroup=str(i), showlegend=False)]
# make layout
layout = go.Layout(title=title,
xaxis={'title': 'FPR'},
yaxis={'title': 'TPR'},
legend=dict(x=1),
hovermode='closest',
)
fig = go.Figure(data=traces, layout=layout)
return plotOut(fig, plot) | a8ca19b92f7f3539d8550cf63121a46d36e59cbf | 3,952 |
def fasta_to_dict(fasta_file):
"""Consolidate deflines and sequences from FASTA as dictionary"""
deflines = []
sequences = []
sequence = ""
with open(fasta_file, "r") as file:
for line in file:
if line.startswith(">"):
deflines.append(line.rstrip().lstrip('>'))
if sequence:
sequences.append(sequence)
sequence = ""
else:
sequence += line.rstrip()
sequences.append(sequence)
fasta_dict = {}
for x, defline in enumerate(deflines):
fasta_dict[defline]=sequences[x]
return fasta_dict | e1740ad29672e5239d575df963e21a0bf5caee08 | 3,953 |
def find_roots(graph):
"""
return nodes which you can't traverse down any further
"""
return [n for n in graph.nodes() if len(list(graph.predecessors(n))) == 0] | 7dbf755d2b76f066370d149638433c6693e8e7b9 | 3,954 |
def _is_test_file(filesystem, dirname, filename):
"""Return true if the filename points to a test file."""
return (_has_supported_extension(filesystem, filename) and
not is_reference_html_file(filename)) | ba161818a6f2497e1122519945f255d56488f231 | 3,955 |
def kitchen_door_device() -> Service:
"""Build the kitchen door device."""
transitions: TransitionFunction = {
"unique": {
"open_door_kitchen": "unique",
"close_door_kitchen": "unique",
},
}
final_states = {"unique"}
initial_state = "unique"
return build_deterministic_service_from_transitions(transitions, initial_state, final_states) | 700a1d92087ac91f5311b4c55380f1a6f18860b4 | 3,956 |
import http
def sql_connection_delete(
request: http.HttpRequest,
pk: int
) -> http.JsonResponse:
"""AJAX processor for the delete SQL connection operation.
:param request: AJAX request
:param pk: primary key for the connection
:return: AJAX response to handle the form
"""
conn = models.SQLConnection.objects.filter(pk=pk).first()
if not conn:
# The view is not there. Redirect to workflow detail
return http.JsonResponse({'html_redirect': reverse('home')})
return services.delete(
request,
conn,
reverse('connection:sqlconn_delete', kwargs={'pk': conn.id})) | 754e7d7f15a0be843b89c89446a7d4f39bc1401f | 3,957 |
from sage.all import solve
import html
def simpson_integration(
title = text_control('<h2>Simpson integration</h2>'),
f = input_box(default = 'x*sin(x)+x+1', label='$f(x)=$'),
n = slider(2,100,2,6, label='# divisions'),
interval_input = selector(['from slider','from keyboard'], label='Integration interval', buttons=True),
interval_s = range_slider(-10,10,default=(0,10), label="slider: "),
interval_g = input_grid(1,2,default=[[0,10]], label="keyboard: "),
output_form = selector(['traditional','table','none'], label='Computations form', buttons=True)):
"""
Interact explaining the simpson method for definite integrals, based on work by
Lauri Ruotsalainen, 2010 (based on the application "Numerical integrals with various rules"
by Marshall Hampton and Nick Alexander)
INPUT:
- ``f`` -- function of variable x to integrate
- ``n`` -- number of divisions (mult. of 2)
- ``interval_input`` -- swithes the input for interval between slider and keyboard
- ``interval_s`` -- slider for interval to integrate
- ``interval_g`` -- input grid for interval to integrate
- ``output_form`` -- the computation is formatted in a traditional form, in a table or missing
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.calculus.simpson_integration()
<html>...</html>
"""
x = SR.var('x')
f = symbolic_expression(f).function(x)
if interval_input == 'from slider':
interval = interval_s
else:
interval = interval_g[0]
def parabola(a, b, c):
A, B, C = SR.var("A, B, C")
K = solve([A*a[0]**2+B*a[0]+C==a[1], A*b[0]**2+B*b[0]+C==b[1], A*c[0]**2+B*c[0]+C==c[1]], [A, B, C], solution_dict=True)[0]
f = K[A]*x**2+K[B]*x+K[C]
return f
xs = []; ys = []
dx = float(interval[1]-interval[0])/n
for i in range(n+1):
xs.append(interval[0] + i*dx)
ys.append(f(x=xs[-1]))
parabolas = Graphics()
lines = Graphics()
for i in range(0, n-1, 2):
p = parabola((xs[i],ys[i]),(xs[i+1],ys[i+1]),(xs[i+2],ys[i+2]))
parabolas += plot(p(x=x), (x, xs[i], xs[i+2]), color="red")
lines += line([(xs[i],ys[i]), (xs[i],0), (xs[i+2],0)],color="red")
lines += line([(xs[i+1],ys[i+1]), (xs[i+1],0)], linestyle="-.", color="red")
lines += line([(xs[-1],ys[-1]), (xs[-1],0)], color="red")
html(r'Function $f(x)=%s$'%latex(f(x)))
show(plot(f(x),x,interval[0],interval[1]) + parabolas + lines, xmin = interval[0], xmax = interval[1])
numeric_value = integral_numerical(f,interval[0],interval[1])[0]
approx = dx/3 *(ys[0] + sum([4*ys[i] for i in range(1,n,2)]) + sum([2*ys[i] for i in range(2,n,2)]) + ys[n])
html(r'Integral value to seven decimal places is: $\displaystyle\int_{%.2f}^{%.2f} {f(x) \, \mathrm{d}x} = %.6f$'%
(interval[0],interval[1],
N(numeric_value,digits=7)))
if output_form == 'traditional':
sum_formula_html = r"\frac{d}{3} \cdot \left[ f(x_0) + %s + f(x_{%s})\right]" % (
' + '.join([ r"%s \cdot f(x_{%s})" %(i%2*(-2)+4, i+1) for i in range(0,n-1)]),
n
)
sum_placement_html = r"\frac{%.2f}{3} \cdot \left[ f(%.2f) + %s + f(%.2f)\right]" % (
dx,
N(xs[0],digits=5),
' + '.join([ r"%s \cdot f(%.2f)" %(i%2*(-2)+4, N(xk, digits=5)) for i, xk in enumerate(xs[1:-1])]),
N(xs[n],digits=5)
)
sum_values_html = r"\frac{%.2f}{3} \cdot \left[ %s %s %s\right]" %(
dx,
"%.2f + "%N(ys[0],digits=5),
' + '.join([ r"%s \cdot %.2f" %(i%2*(-2)+4, N(yk, digits=5)) for i, yk in enumerate(ys[1:-1])]),
" + %.2f"%N(ys[n],digits=5)
)
html(r'''
<div class="math">
\begin{align*}
\int_{%.2f}^{%.2f} {f(x) \, \mathrm{d}x}
& \approx %s \\
& = %s \\
& = %s \\
& = %.6f
\end{align*}
</div>
''' % (
interval[0], interval[1],
sum_formula_html, sum_placement_html, sum_values_html,
N(approx,digits=7)
))
elif output_form == 'table':
s = [['$i$','$x_i$','$f(x_i)$','$m$','$m\cdot f(x_i)$']]
for i in range(0,n+1):
if i==0 or i==n:
j = 1
else:
j = (i+1)%2*(-2)+4
s.append([i, xs[i], ys[i],j,N(j*ys[i])])
s.append(['','','','$\sum$','$%s$'%latex(3/dx*approx)])
pretty_print(table(s, header_row=True))
html(r'$\int_{%.2f}^{%.2f} {f(x) \, \mathrm{d}x}\approx\frac {%.2f}{3}\cdot %s=%s$'%
(interval[0], interval[1],dx,latex(3/dx*approx),latex(approx))) | 45e575e9ebda475a613555dfcb43ae7d739131c9 | 3,958 |
def object_reactions_form_target(object):
"""
Get the target URL for the object reaction form.
Example::
<form action="{% object_reactions_form_target object %}" method="post">
"""
ctype = ContentType.objects.get_for_model(object)
return reverse("comments-ink-react-to-object", args=(ctype.id, object.id)) | 5bcd4d9fa8db783c78668820326dd55038ef609e | 3,959 |
def check_args(**kwargs):
"""
Check arguments for themis load function
Parameters:
**kwargs : a dictionary of arguments
Possible arguments are: probe, level
The arguments can be: a string or a list of strings
Invalid argument are ignored (e.g. probe = 'g', level='l0', etc.)
Invalid argument names are ignored (e.g. 'probes', 'lev', etc.)
Returns: list
Prepared arguments in the same order as the inputs
Examples:
res_probe = check_args(probe='a')
(res_probe, res_level) = check_args(probe='a b', level='l2')
(res_level, res_probe) = check_args(level='l1', probe=['a', 'b'])
# With incorrect argument probes:
res = check_args(probe='a', level='l2', probes='a b') : res = [['a'], ['l2']]
"""
valid_keys = {'probe', 'level'}
valid_probe = {'a', 'b', 'c', 'd', 'e'}
valid_level = {'l1', 'l2'}
# Return list of values from arg_list that are only included in valid_set
def valid_list(arg_list, valid_set):
valid_res = []
for arg in arg_list:
if arg in valid_set:
valid_res.append(arg)
return valid_res
# Return list
res = []
for key, values in kwargs.items():
if key.lower() not in valid_keys:
continue
# resulting list
arg_values = []
# convert string into list, or ignore the argument
if isinstance(values, str):
values = [values]
elif not isinstance(values, list):
continue
for value in values:
arg_values.extend(value.strip().lower().split())
# simple validation of the arguments
if key.lower() == 'probe':
arg_values = valid_list(arg_values, valid_probe)
if key.lower() == 'level':
arg_values = valid_list(arg_values, valid_level)
res.append(arg_values)
return res | 3e25dc43df0a80a9a16bcca0729ee0b170a9fb89 | 3,960 |
def make_theta_mask(aa):
""" Gives the theta of the bond originating each atom. """
mask = np.zeros(14)
# backbone
mask[0] = BB_BUILD_INFO["BONDANGS"]['ca-c-n'] # nitrogen
mask[1] = BB_BUILD_INFO["BONDANGS"]['c-n-ca'] # c_alpha
mask[2] = BB_BUILD_INFO["BONDANGS"]['n-ca-c'] # carbon
mask[3] = BB_BUILD_INFO["BONDANGS"]['ca-c-o'] # oxygen
# sidechain
for i, theta in enumerate(SC_BUILD_INFO[aa]['angles-vals']):
mask[4 + i] = theta
return mask | f33c1b46150ed16154c9a10c92f30cf9f60c2f51 | 3,961 |
def create_keypoint(n,*args):
"""
Parameters:
-----------
n : int
Keypoint number
*args: tuple, int, float
*args must be a tuple of (x,y,z) coordinates or x, y and z
coordinates as arguments.
::
# Example
kp1 = 1
kp2 = 2
create_keypoint(kp1,(0,0,0)) # x,y,z as tuple
create_keypoint(kp2,1,1,1) # x,y,z as arguments
"""
if len(args)==1 and isinstance(args[0],tuple):
x,y,z = args[0][0],args[0][1],args[0][2]
else:
x,y,z = args[0], args[1], args[2]
_kp = "K,%g,%g,%g,%g"%(n,x,y,z)
return _kp | e498e36418ec19d2feef122d3c42a346f9de4af7 | 3,962 |
import time
def wait_for_sidekiq(gl):
"""
Return a helper function to wait until there are no busy sidekiq processes.
Use this with asserts for slow tasks (group/project/user creation/deletion).
"""
def _wait(timeout=30, step=0.5):
for _ in range(timeout):
time.sleep(step)
busy = False
processes = gl.sidekiq.process_metrics()["processes"]
for process in processes:
if process["busy"]:
busy = True
if not busy:
return True
return False
return _wait | 7fe98f13e9474739bfe4066f20e5f7d813ee4476 | 3,963 |
import os
import shutil
import subprocess
def ldd(file):
"""
Given a file return all the libraries referenced by the file
@type file: string
@param file: Full path to the file
@return: List containing linked libraries required by the file
@rtype: list
"""
rlist = []
if os.path.exists(file) and shutil.which("ldd") is not None:
process = subprocess.Popen(["ldd", file], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in process.stdout.readlines():
tokens = line.split(b"=>")
if len(tokens) == 2:
lib_loc = ((tokens[1].strip()).split(b" "))[0].strip()
if os.path.exists(lib_loc):
rlist.append(os.path.abspath(lib_loc).decode("utf-8"))
return rlist | d893fd9dc61a7b0c1f35c19b3f300b9f1b333eb2 | 3,964 |
def insert_node_after(new_node, insert_after):
"""Insert new_node into buffer after insert_after."""
next_element = insert_after['next']
next_element['prev'] = new_node
new_node['next'] = insert_after['next']
insert_after['next'] = new_node
new_node['prev'] = insert_after
return new_node | e03fbd7bd44a3d85d36069d494464b9237bdd306 | 3,965 |
def apply_wavelet_decomposition(mat, wavelet_name, level=None):
"""
Apply 2D wavelet decomposition.
Parameters
----------
mat : array_like
2D array.
wavelet_name : str
Name of a wavelet. E.g. "db5"
level : int, optional
Decomposition level. It is constrained to return an array with
a minimum size of larger than 16 pixels.
Returns
-------
list
The first element is an 2D-array, next elements are tuples of three
2D-arrays. i.e [mat_n, (cH_level_n, cV_level_n, cD_level_n), ...,
(cH_level_1, cV_level_1, cD_level_1)]
"""
(nrow, ncol) = mat.shape
max_level = int(
min(np.floor(np.log2(nrow / 16.0)), np.floor(np.log2(ncol / 16.0))))
if (level is None) or (level > max_level) or (level < 1):
level = max_level
return pywt.wavedec2(mat, wavelet_name, level=level) | d91f534d605d03c364c89383629a7142f4705ac8 | 3,966 |
import math
def ACE(img, ratio=4, radius=300):
"""The implementation of ACE"""
global para
para_mat = para.get(radius)
if para_mat is not None:
pass
else:
size = radius * 2 + 1
para_mat = np.zeros((size, size))
for h in range(-radius, radius + 1):
for w in range(-radius, radius + 1):
if not h and not w:
continue
para_mat[radius + h, radius + w] = 1.0 / \
math.sqrt(h ** 2 + w ** 2)
para_mat /= para_mat.sum()
para[radius] = para_mat
h, w = img.shape[:2]
p_h, p_w = [0] * radius + list(range(h)) + [h - 1] * radius, \
[0] * radius + list(range(w)) + [w - 1] * radius
temp = img[np.ix_(p_h, p_w)]
res = np.zeros(img.shape)
for i in range(radius * 2 + 1):
for j in range(radius * 2 + 1):
if para_mat[i][j] == 0:
continue
res += (para_mat[i][j] *
np.clip((img - temp[i:i + h, j:j + w]) * ratio, -1, 1))
return res | 6809067ec1aed0f20d62d672fcfb554e0ab51f28 | 3,967 |
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name | af4e05b0adaa9c90bb9946edf1dba67a40e78323 | 3,968 |
import time
def demc_block(y, pars, pmin, pmax, stepsize, numit, sigma, numparams, cummodels, functype, myfuncs, funcx, iortholist, fits, gamma=None, isGR=True, ncpu=1):
"""
This function uses a differential evolution Markov chain with block updating to assess uncertainties.
PARAMETERS
----------
y: Array containing dependent data
Params: Array of initial guess for parameters
#Pmin: Array of parameter minimum values
#Pmax: Array of parameter maximum values
stepsize: Array of 1-sigma change in parameter per iteration
Numit: Number of iterations to perform
Sigma: Standard deviation of data noise in y
Numparams: Number of parameters for each model
Cummodels: Cumulative number of models used
Functype: Define function type (eclipse, ramp, ip, etc), see models.py
Myfuncs: Pointers to model functions
Funcx: Array of x-axis values for myfuncs
fit: List of fit objects
gamma: Multiplcation factor in parameter differential, establishes acceptance rate
OUTPUTS
-------
This function returns an array of the best fitting parameters,
an array of all parameters over all iterations, and numaccept.
REFERENCES
----------
Cajo J. F. Ter Braak, "Genetic algorithms and Markov Chain Monte Carlo: Differential Evolution Markov Chain makes Bayesian computing easy," Biometrics, 2006.
HISTORY
-------
Adapted from mcmc.py
Kevin Stevenson, UChicago August 2012
"""
global fit
fit = fits
params = np.copy(pars)
nchains, nump = params.shape
nextp = np.copy(params) #Proposed parameters
bestp = np.copy(params[0]) #Best-fit parameters
pedit = np.copy(params) #Editable parameters
numaccept = 0
allparams = np.zeros((nump, nchains, numit))
inotfixed = np.where(stepsize != 0)[0]
ishare = np.where(stepsize < 0)[0]
#ifree = np.where(stepsize > 0)[0]
outside = np.zeros((nchains, nump))
numevents = len(fit)
intsteps = np.min((numit/5,1e5))
isrednoise = False
wavelet = None
noisefunc = None
#UPDATE PARAMTER(S) EQUAL TO OTHER PARAMETER(S)
if (ishare.size > 0):
for s in range(ishare.size):
params[:,ishare[s]] = params[:,int(abs(stepsize[ishare[s]])-1)]
#Define blocks
blocks = []
for j in range(numevents):
#Build list of blocks
blocks = np.concatenate((blocks, fit[j].blocks))
for i in range(cummodels[j],cummodels[j+1]):
if functype[i] == 'noise':
# Set up for modified chi-squared calculation using correlated noise
isrednoise = True
wavelet = fit[j].etc[k]
noisefunc = myfuncs[i]
blocks = blocks.astype(int)
iblocks = []
eps = []
numblocks = blocks.max() + 1
numbp = np.zeros(numblocks)
ifree = [[] for i in range(numblocks)]
for b in range(numblocks):
#Map block indices
whereb = np.where(blocks == b)[0]
iblocks.append(whereb)
#Locate indices of free parameters in each block
for w in whereb:
ifree[b] = np.concatenate((ifree[b],numparams[w]+np.where(stepsize[numparams[w]:numparams[w+1]] > 0)[0])).astype(int)
#Calculate number of free parameters per block
numbp[b] += len(ifree[b])
eps.append(npr.normal(0, stepsize[ifree[b]]/100., [numit,numbp[b]]))
print("Number of free parameters per block:")
print(numbp)
numa = np.zeros(numblocks)
if gamma == None:
gamma = 2.38/np.sqrt(2.*numbp)
print("gamma:")
print(gamma)
#Calc chi-squared for model type using current params
currchisq = np.zeros(nchains)
currmodel = [[] for i in range(numevents)]
for j in range(numevents):
currmodel[j], noisepars = calcModel(nchains, functype, myfuncs, pedit, params, iortholist[j],
funcx, cummodels, numparams, j)
currchisq += calcChisq(y[j], sigma[j], currmodel[j], nchains, params, j, noisepars, isrednoise, wavelet, noisefunc)
bestchisq = currchisq[0]
#GENERATE RANDOM NUMBERS FOR MCMC
numnotfixed = len(inotfixed)
unif = npr.rand(numit,nchains)
randchains = npr.randint(0,nchains,[numit,nchains,2])
#START TIMER
clock = timer.Timer(numit,progress = np.arange(0.05,1.01,0.05))
#Run Differential Evolution Monte Carlo algorithm 'numit' times
for m in range(numit):
#Select next event (block) to update
b = m % numblocks
#Remove model component(s) that are taking a step
pedit = np.copy(params)
nextmodel = currmodel[:]
for j in range(numevents):
ymodels, noisepars = calcModel(nchains, functype, myfuncs, pedit, params, iortholist[j],
funcx, cummodels, numparams, j, iblocks[b])
nextmodel[j] = np.divide(currmodel[j],ymodels)
#Generate next step using differential evolution
for n in range(nchains):
rand1, rand2 = randchains[m,n]
while rand1 == n or rand2 == n or rand1 == rand2:
rand1, rand2 = npr.randint(0,nchains,2)
nextp[n,ifree[b]] = params[n,ifree[b]] + gamma[b]*(params[rand1,ifree[b]]-params[rand2,ifree[b]]) + eps[b][m]
#CHECK FOR NEW STEPS OUTSIDE BOUNDARIES
ioutside = np.where(np.bitwise_or(nextp[n] < pmin, nextp[n] > pmax))[0]
if (len(ioutside) > 0):
nextp[n,ioutside] = np.copy(params[n,ioutside])
outside[n,ioutside] += 1
#UPDATE PARAMTER(S) EQUAL TO OTHER PARAMETER(S)
if (ishare.size > 0):
for s in range(ishare.size):
nextp[:,ishare[s]] = nextp[:,int(abs(stepsize[ishare[s]])-1)]
#COMPUTE NEXT CHI SQUARED AND ACCEPTANCE VALUES
pedit = np.copy(nextp)
nextchisq = np.zeros(nchains)
for j in range(numevents):
ymodels, noisepars = calcModel(nchains, functype, myfuncs, pedit, params, iortholist[j], funcx, cummodels, numparams, j, iblocks[b])
nextmodel[j] = np.multiply(nextmodel[j],ymodels)
nextchisq += calcChisq(y[j], sigma[j], nextmodel[j], nchains, params, j, noisepars, isrednoise, wavelet, noisefunc)
#CALCULATE ACCEPTANCE PROBABILITY
accept = np.exp(0.5 * (currchisq - nextchisq))
#print(b,currchisq[0], nextchisq[0], accept[0])
for n in range(nchains):
if accept[n] >= 1:
#ACCEPT BETTER STEP
numaccept += 1
numa[b] += 1
params[n] = np.copy(nextp[n])
currchisq[n] = np.copy(nextchisq[n])
if (currchisq[n] < bestchisq):
bestp = np.copy(params[n])
bestchisq = np.copy(currchisq[n])
elif unif[m,n] <= accept[n]:
#ACCEPT WORSE STEP
numaccept += 1
numa[b] += 1
params[n] = np.copy(nextp[n])
currchisq[n] = np.copy(nextchisq[n])
allparams[:,:,m] = params.T
#PRINT INTERMEDIATE INFO
if ((m+1) % intsteps == 0) and (m > 0):
print("\n" + time.ctime())
#print("Number of times parameter tries to step outside its prior:")
#print(outside)
print("Current Best Parameters: ")
print(bestp)
#Apply Gelman-Rubin statistic
if isGR:
#Check for no accepted steps in each chain
#stdev = np.std(allparams[inotfixed],axis=1)
#ichain = np.where(stdev > 0.)[0]
#Call test
#psrf, meanpsrf = gr.convergetest(allparams[inotfixed,ichain,:m+1], len(ichain))
psrf, meanpsrf = gr.convergetest(allparams[inotfixed,:,:m+1], nchains)
numconv = np.sum(np.bitwise_and(psrf < 1.01, psrf >= 1.00))
print("Gelman-Rubin statistic for free parameters:")
print(psrf)
if numconv == numnotfixed: #and m >= 1e4:
print("All parameters have converged to within 1% of unity. Halting MCMC.")
allparams = allparams[:,:,:m+1]
break
clock.check(m+1)
print("Acceptance rate per block (%):")
print(100.*numa*numblocks/numit/nchains)
allparams = np.reshape(allparams,(nump, (m+1)*nchains))
return allparams, bestp, numaccept, (m+1)*nchains | 414168976c732d66165e19c356800158b2056a1e | 3,969 |
def shape5d(a, data_format="NDHWC"):
"""
Ensuer a 5D shape, to use with 5D symbolic functions.
Args:
a: a int or tuple/list of length 3
Returns:
list: of length 5. if ``a`` is a int, return ``[1, a, a, a, 1]``
or ``[1, 1, a, a, a]`` depending on data_format "NDHWC" or "NCDHW".
"""
s2d = shape3d(a)
if data_format == "NDHWC":
return [1] + s2d + [1]
else:
return [1, 1] + s2d | fe6d974791a219c45a543a4d853f5d44770d0c9a | 3,970 |
def _compute_node_to_inventory_dict(compute_node):
"""Given a supplied `objects.ComputeNode` object, return a dict, keyed
by resource class, of various inventory information.
:param compute_node: `objects.ComputeNode` object to translate
"""
result = {}
# NOTE(jaypipes): Ironic virt driver will return 0 values for vcpus,
# memory_mb and disk_gb if the Ironic node is not available/operable
# WRS: allow max_unit to be number of vcpus * allocation ratio to allow
# for instances with dedicated cpu_policy to allocate correctly. Given
# change to max unit have to set allocation ratio in resource inventory
# to 1 so capacity check is correct.
if compute_node.vcpus > 0:
result[VCPU] = {
'total': int(compute_node.vcpus *
compute_node.cpu_allocation_ratio),
'reserved': CONF.reserved_host_cpus,
'min_unit': 1,
'max_unit': int(compute_node.vcpus *
compute_node.cpu_allocation_ratio),
'step_size': 1,
'allocation_ratio': 1,
}
if compute_node.memory_mb > 0:
result[MEMORY_MB] = {
'total': compute_node.memory_mb,
'reserved': CONF.reserved_host_memory_mb,
'min_unit': 1,
'max_unit': compute_node.memory_mb,
'step_size': 1,
'allocation_ratio': compute_node.ram_allocation_ratio,
}
if compute_node.local_gb > 0:
# TODO(johngarbutt) We should either move to reserved_host_disk_gb
# or start tracking DISK_MB.
reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb(
CONF.reserved_host_disk_mb)
result[DISK_GB] = {
'total': compute_node.local_gb,
'reserved': reserved_disk_gb,
'min_unit': 1,
'max_unit': compute_node.local_gb,
'step_size': 1,
'allocation_ratio': compute_node.disk_allocation_ratio,
}
return result | 385170dd5021da202364d03c66a0b6d268580945 | 3,971 |
def resnet152(pretrained=False, num_classes=1000, ifmask=True, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
block = Bottleneck
model = ResNet(block, [3, 8, 36, 3], num_classes=1000, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
model.fc = nn.Linear(512 * block.expansion, num_classes)
if ifmask:
model.lmask = LearnableMaskLayer(feature_dim=512* block.expansion, num_classes=num_classes)
return model | 8b72a8e284d098a089448e4a10d5f393345d7278 | 3,972 |
def register_driver(cls):
"""
Registers a driver class
Args:
cls (object): Driver class.
Returns:
name: driver name
"""
_discover_on_demand()
if not issubclass(cls, BaseDriver):
raise QiskitChemistryError('Could not register class {} is not subclass of BaseDriver'.format(cls))
return _register_driver(cls) | 82eca23a5cf5caf9a028d040ac523aa6e20ae01d | 3,973 |
def ceil(array, value):
"""
Returns the smallest index i such that array[i - 1] < value.
"""
l = 0
r = len(array) - 1
i = r + 1
while l <= r:
m = l + int((r - l) / 2)
if array[m] >= value:
# This mid index is a candidate for the index we are searching for
# so save it, and continue searching for a smaller candidate on the
# left side.
i = m
r = m - 1
else:
# This mid index is not a candidate so continue searching the right
# side.
l = m + 1
return i | 689148cebc61ee60c99464fde10e6005b5d901a9 | 3,974 |
import copy
def FindOrgByUnionEtIntersection(Orgs):
"""Given a set of organizations considers all the possible unions and intersections to find all the possible organizations"""
NewNewOrgs=set([])
KnownOrgs=copy.deepcopy(Orgs)
for h in combinations(Orgs,2):
#checks only if one is not contained in the other
NewNewOrgs|=frozenset([OrgLibrary.check(h[0]|h[1])])
#checks only if one is not contained in the other
NewNewOrgs|=frozenset([OrgLibrary.check(h[0]&h[1])])
FoundOrgs=NewNewOrgs
NewOrgs=NewNewOrgs-KnownOrgs
while NewOrgs:
NewNewOrgs=set([])
for h in combinations(NewOrgs,2):
#checks only if one is not contained in the other
NewNewOrgs|=frozenset([OrgLibrary.check(h[0]|h[1])])
#checks only if one is not contained in the other
NewNewOrgs|=frozenset([OrgLibrary.check(h[0]&h[1])])
for h in NewOrgs:
for t in KnownOrgs:
#checks only if one is not contained in the other
NewNewOrgs|=frozenset([OrgLibrary.check(h|t)])
#checks only if one is not contained in the other
NewNewOrgs|=frozenset([OrgLibrary.check(h&t)])
KnownOrgs|=NewOrgs
NewOrgs=NewNewOrgs-KnownOrgs#NewOrgs is what we actually found
KnownOrgs-=Orgs
return KnownOrgs | 6e2450f49522186094b205dd86d8e698aca708bc | 3,975 |
def get_sf_fa(
constraint_scale: float = 1
) -> pyrosetta.rosetta.core.scoring.ScoreFunction:
"""
Get score function for full-atom minimization and scoring
"""
sf = pyrosetta.create_score_function('ref2015')
sf.set_weight(
pyrosetta.rosetta.core.scoring.ScoreType.atom_pair_constraint,
5.0 * constraint_scale)
sf.set_weight(pyrosetta.rosetta.core.scoring.ScoreType.dihedral_constraint,
1.0 * constraint_scale)
sf.set_weight(pyrosetta.rosetta.core.scoring.ScoreType.angle_constraint,
1.0 * constraint_scale)
return sf | b82b352f3fc031cc18951b037779a71247e5095f | 3,976 |
from typing import Optional
from typing import List
def make_keypoint(class_name: str, x: float, y: float, subs: Optional[List[SubAnnotation]] = None) -> Annotation:
"""
Creates and returns a keypoint, aka point, annotation.
Parameters
----------
class_name : str
The name of the class for this ``Annotation``.
x : float
The ``x`` value of the point.
y : float
The ``y`` value of the point.
subs : Optional[List[SubAnnotation]], default: None
List of ``SubAnnotation``s for this ``Annotation``.
Returns
-------
Annotation
A point ``Annotation``.
"""
return Annotation(AnnotationClass(class_name, "keypoint"), {"x": x, "y": y}, subs or []) | bc4a96c8376890eaaa2170ab1cc1401dcb2781a4 | 3,977 |
def plane_mean(window):
"""Plane mean kernel to use with convolution process on image
Args:
window: the window part to use from image
Returns:
Normalized residual error from mean plane
Example:
>>> from ipfml.filters.kernels import plane_mean
>>> import numpy as np
>>> window = np.arange(9).reshape([3, 3])
>>> result = plane_mean(window)
>>> (result < 0.0001)
True
"""
window = np.array(window)
width, height = window.shape
# prepare data
nb_elem = width * height
xs = [int(i / height) for i in range(nb_elem)]
ys = [i % height for i in range(nb_elem)]
zs = np.array(window).flatten().tolist()
# get residual (error) from mean plane computed
tmp_A = []
tmp_b = []
for i in range(len(xs)):
tmp_A.append([xs[i], ys[i], 1])
tmp_b.append(zs[i])
b = np.matrix(tmp_b).T
A = np.matrix(tmp_A)
fit = (A.T * A).I * A.T * b
errors = b - A * fit
residual = np.linalg.norm(errors)
return residual | 7383078ec3c88ac52728cddca9a725f6211b2d2c | 3,978 |
def _eval_field_amplitudes(lat, k=5, n=1, amp=1e-5, field='v',
wave_type='Rossby', parameters=Earth):
"""
Evaluates the latitude dependent amplitudes at a given latitude point.
Parameters
----------
lat : Float, array_like or scalar
latitude(radians)
k : Integer, scalar
spherical wave-number (dimensionless)
Default : 5
n : Integer, scaler
wave-mode (dimensionless)
Default : 1
amp : Float, scalar
wave amplitude(m/sec)
Default : 1e-5
field : str
pick 'phi' for geopotential height,
'u' for zonal velocity and 'v' for meridional velocity
Defualt : 'v'
wave_type: str
choose Rossby waves or WIG waves or EIG waves.
Defualt: Rossby
parameters: dict
planetary parameters dict with keys:
angular_frequency: float, (rad/sec)
gravitational_acceleration: float, (m/sec^2)
mean_radius: float, (m)
layer_mean_depth: float, (m)
Defualt: Earth's parameters defined above
Returns
-------
Either u_hat(m/sec), v_hat(m/sec) or p_hat(m^2/sec^2) : Float, array_like
or scalar Evaluation of the amplitudes for the zonal velocity,
or meridional velocity or the geopotential height respectivly.
Notes
-----
This function supports k>=1 and n>=1 inputs only.
Special treatments are required for k=0 and n=-1,0/-.
"""
if not isinstance(wave_type, str):
raise TypeError(str(wave_type) + ' should be string...')
# unpack dictionary into vars:
OMEGA = _unpack_parameters(parameters, 'angular_frequency')
G = _unpack_parameters(parameters, 'gravitational_acceleration')
A = _unpack_parameters(parameters, 'mean_radius')
H0 = _unpack_parameters(parameters, 'layer_mean_depth')
# Lamb's parameter:
Lamb = (2. * OMEGA * A)**2 / (G * H0)
# evaluate wave frequency:
all_omegas = _eval_omega(k, n, parameters)
# check for validity of wave_type:
if wave_type not in all_omegas:
raise KeyError(wave_type + ' should be Rossby, EIG or WIG...')
omega = all_omegas[wave_type]
# evaluate the meridional velocity amp first:
v_hat = _eval_meridional_velocity(lat, Lamb, n, amp)
# evaluate functions for u and phi:
v_hat_plus_1 = _eval_meridional_velocity(lat, Lamb, n + 1, amp)
v_hat_minus_1 = _eval_meridional_velocity(lat, Lamb, n - 1, amp)
# Eq. (6a) in the text
if field == 'v':
return v_hat
# Eq. (6b) in the text
elif field == 'u':
u_hat = (- ((n + 1) / 2.0)**0.5 * (omega / (G * H0)**0.5 + k / A) *
v_hat_plus_1 - ((n) / 2.0)**0.5 * (omega / (G * H0)**0.5 -
k / A) * v_hat_minus_1)
# pre-factors
u_hat = G * H0 * Lamb**0.25 / \
(1j * A * (omega**2 - G * H0 * (k / A)**2)) * u_hat
return u_hat
# Eq. (6c) in the text
elif field == 'phi':
p_hat = (- ((n + 1) / 2.0)**0.5 * (omega + (G * H0)**0.5 * k / A) *
v_hat_plus_1 + ((n) / 2.0)**0.5 * (omega - (G * H0)**0.5 *
k / A) * v_hat_minus_1)
p_hat = G * H0 * Lamb**0.25 / \
(1j * A * (omega**2 - G * H0 * (k / A)**2)) * p_hat
return p_hat
else:
raise KeyError('field must be u, v or phi') | db74c50ef6328055ab2a59faecba72cc28afd136 | 3,979 |
def get_uframe_info():
""" Get uframe configuration information. (uframe_url, uframe timeout_connect and timeout_read.) """
uframe_url = current_app.config['UFRAME_URL'] + current_app.config['UFRAME_URL_BASE']
timeout = current_app.config['UFRAME_TIMEOUT_CONNECT']
timeout_read = current_app.config['UFRAME_TIMEOUT_READ']
return uframe_url, timeout, timeout_read | 921f42d59af265152d7ce453a19cb8057af8415e | 3,980 |
def yd_process_results(
mentions_dataset,
predictions,
processed,
sentence2ner,
include_offset=False,
mode='default',
rank_pred_score=True,
):
"""
Function that can be used to process the End-to-End results.
:return: dictionary with results and document as key.
"""
assert mode in ['best_candidate', 'remove_invalid', 'default']
res = {}
for doc in mentions_dataset:
if doc not in predictions:
# No mentions found, we return empty list.
continue
pred_doc = predictions[doc]
ment_doc = mentions_dataset[doc]
text = processed[doc][0]
res_doc = []
for pred, ment in zip(pred_doc, ment_doc):
sent = ment["sentence"]
idx = ment["sent_idx"]
start_pos = ment["pos"]
mention_length = int(ment["end_pos"] - ment["pos"])
if pred["prediction"] != "NIL":
candidates = [
{
'cand_rank': cand_rank,
'cand_name': cand_name,
'cand_score': cand_score,
}
for cand_rank, (cand_name, cand_mask, cand_score)
in enumerate(zip(pred['candidates'], pred['masks'], pred['scores']))
if float(cand_mask) == 1
]
if rank_pred_score:
candidates = sorted(candidates, key=lambda x: float(x['cand_score']), reverse=True)
# make sure that ed_model predict is always in the first place.
for cand_index, candidate in enumerate(candidates):
if candidate['cand_name'] == pred['prediction']:
if cand_index != 0:
candidates[0], candidates[cand_index] = candidates[cand_index], candidates[0]
break
if len(candidates) == 1:
temp = (
start_pos,
mention_length,
pred["prediction"],
ment["ngram"],
pred["conf_ed"],
ment["conf_md"] if "conf_md" in ment else 0.0,
ment["tag"] if "tag" in ment else "NULL",
[tmp_candidate['cand_name'] for tmp_candidate in candidates],
)
res_doc.append(temp)
else:
if mode == 'best_candidate':
for cand_index, candidate in enumerate(candidates):
tmp_cand_name = candidate['cand_name'].replace('_', ' ')
if sentence2ner is not None and \
tmp_cand_name in sentence2ner and \
ment["tag"] != sentence2ner[tmp_cand_name]:
continue
else:
temp = (
start_pos,
mention_length,
candidate['cand_name'],
ment["ngram"],
pred["conf_ed"],
ment["conf_md"] if "conf_md" in ment else 0.0,
ment["tag"] if "tag" in ment else "NULL",
[tmp_candidate['cand_name'] for tmp_candidate in candidates],
)
res_doc.append(temp)
break
elif mode == 'remove_invalid':
tmp_cand_name = pred["prediction"].replace('_', '')
if sentence2ner is not None and \
tmp_cand_name in sentence2ner and \
ment["tag"] != sentence2ner[tmp_cand_name]:
pass
else:
temp = (
start_pos,
mention_length,
pred["prediction"],
ment["ngram"],
pred["conf_ed"],
ment["conf_md"] if "conf_md" in ment else 0.0,
ment["tag"] if "tag" in ment else "NULL",
[tmp_candidate['cand_name'] for tmp_candidate in candidates],
)
res_doc.append(temp)
elif mode == 'default':
temp = (
start_pos,
mention_length,
pred["prediction"],
ment["ngram"],
pred["conf_ed"],
ment["conf_md"] if "conf_md" in ment else 0.0,
ment["tag"] if "tag" in ment else "NULL",
[tmp_candidate['cand_name'] for tmp_candidate in candidates],
)
res_doc.append(temp)
res[doc] = res_doc
return res | 32352c6aabea6750a6eb410d62232c96ad6b7e7d | 3,981 |
import re
def valid(f):
"""Formula f is valid if and only if it has no
numbers with leading zero, and evals true."""
try:
return not re.search(r'\b0[0-9]', f) and eval(f) is True
except ArithmeticError:
return False | 1303729dc53288ea157687f78d7266fa7cb2ce79 | 3,982 |
def user_info():
"""
渲染个人中心页面
:return:
"""
user = g.user
if not user:
return redirect('/')
data={
"user_info":user.to_dict()
}
return render_template("news/user.html",data=data) | 54c6c6122f28553f0550a744d5b51c26221f7c60 | 3,983 |
def _check_X(X, n_components=None, n_features=None, ensure_min_samples=1):
"""Check the input data X.
See https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/mixture/_base.py .
Parameters
----------
X : array-like, shape (n_samples, n_features)
n_components : integer
Returns
-------
X : array, shape (n_samples, n_features)
"""
X = check_array(X, dtype=[np.float64, np.float32],
ensure_min_samples=ensure_min_samples)
if n_components is not None and X.shape[0] < n_components:
raise ValueError('Expected n_samples >= n_components '
'but got n_components = %d, n_samples = %d'
% (n_components, X.shape[0]))
if n_features is not None and X.shape[1] != n_features:
raise ValueError("Expected the input data X have %d features, "
"but got %d features"
% (n_features, X.shape[1]))
return X | 429120092a963d1638e04cc96afdfe5979470fee | 3,984 |
def read_viirs_geo (filelist, ephemeris=False, hgt=False):
"""
Read JPSS VIIRS Geo files and return Longitude, Latitude, SatelliteAzimuthAngle, SatelliteRange, SatelliteZenithAngle.
if ephemeris=True, then return midTime, satellite position, velocity, attitude
"""
if type(filelist) is str: filelist = [filelist]
if len(filelist) ==0: return None
# Open user block to read Collection_Short_Name
with h5py.File(filelist[0], 'r') as fn:
user_block_size = fn.userblock_size
with open(filelist[0], 'rU') as fs:
ub_text = fs.read(user_block_size)
ub_xml = etree.fromstring(ub_text.rstrip('\x00'))
#print(ub_text)
#print(etree.tostring(ub_xml))
CollectionName = ub_xml.find('Data_Product/N_Collection_Short_Name').text+'_All'
#print(CollectionName)
# read the data
geos = [h5py.File(filename, 'r') for filename in filelist]
if not ephemeris:
Latitude = np.concatenate([f['All_Data'][CollectionName]['Latitude'][:] for f in geos])
Longitude = np.concatenate([f['All_Data'][CollectionName]['Longitude'][:] for f in geos])
SatelliteAzimuthAngle = np.concatenate([f['All_Data'][CollectionName]['SatelliteAzimuthAngle'][:] for f in geos])
SatelliteRange = np.concatenate([f['All_Data'][CollectionName]['SatelliteRange'][:] for f in geos])
SatelliteZenithAngle = np.concatenate([f['All_Data'][CollectionName]['SatelliteZenithAngle'][:] for f in geos])
Height = np.concatenate([f['All_Data'][CollectionName]['Height'][:] for f in geos])
if hgt:
return Longitude, Latitude, SatelliteAzimuthAngle, SatelliteRange, SatelliteZenithAngle, Height
else:
return Longitude, Latitude, SatelliteAzimuthAngle, SatelliteRange, SatelliteZenithAngle
if ephemeris:
MidTime = np.concatenate([f['All_Data'][CollectionName]['MidTime'] [:] for f in geos])
SCPosition = np.concatenate([f['All_Data'][CollectionName]['SCPosition'][:] for f in geos])
SCVelocity = np.concatenate([f['All_Data'][CollectionName]['SCVelocity'][:] for f in geos])
SCAttitude = np.concatenate([f['All_Data'][CollectionName]['SCAttitude'][:] for f in geos])
return MidTime, SCPosition, SCVelocity, SCAttitude | 1b8bbd34651e13aabe752fa8e3ac8c6679d757ca | 3,985 |
def _in_terminal():
"""
Detect if Python is running in a terminal.
Returns
-------
bool
``True`` if Python is running in a terminal; ``False`` otherwise.
"""
# Assume standard Python interpreter in a terminal.
if "get_ipython" not in globals():
return True
ip = globals()["get_ipython"]()
# IPython as a Jupyter kernel.
if hasattr(ip, "kernel"):
return False
return True | 9716c2a1809f21ed8b827026d29b4ad69045f8d5 | 3,986 |
import re
def create_text_pipeline(documents):
"""
Create the full text pre-processing pipeline using spaCy that first cleans the texts
using the cleaning utility functions and then also removes common stopwords and corpus
specific stopwords. This function is used specifically on abstracts.
:param documents: A list of textual documents to pre-process.
:return cleaned_docs: Pre-processed textual documents.
"""
# Load all the documents into a spaCy pipe.
docs = nlp.pipe(documents, disable=["ner"])
cleaned_docs = []
# Lowercase + custom stopwords list + remove one character tokens + remove symbolical and punctuation tokens.
for doc in docs:
lowercased_sents_without_stops = []
for sent in doc.sents:
lowercased_lemmas_one_sent = []
for token in sent:
if not token.pos_ in {"SYM", "PUNCT"} \
and len(token) > 1 \
and not has_links(token.lower_) \
and not check_for_mostly_numeric_string(token.lower_) \
and not re.sub(r'[^\w\s]', '', token.lemma_) in CUSTOM_STOPS:
lowercased_lemmas_one_sent.append(token.lower_)
sentence = ' '.join(lowercased_lemmas_one_sent)
lowercased_sents_without_stops.append(sentence)
cleaned_docs.append([s for s in lowercased_sents_without_stops])
return cleaned_docs | d31632c7c1d9a2c85362e05ae43f96f35993a746 | 3,987 |
def giou_dist(tlbrs1, tlbrs2):
"""Computes pairwise GIoU distance."""
assert tlbrs1.ndim == tlbrs2.ndim == 2
assert tlbrs1.shape[1] == tlbrs2.shape[1] == 4
Y = np.empty((tlbrs1.shape[0], tlbrs2.shape[0]))
for i in nb.prange(tlbrs1.shape[0]):
area1 = area(tlbrs1[i, :])
for j in range(tlbrs2.shape[0]):
iou = 0.
area_union = area1 + area(tlbrs2[j, :])
iw = min(tlbrs1[i, 2], tlbrs2[j, 2]) - max(tlbrs1[i, 0], tlbrs2[j, 0]) + 1
ih = min(tlbrs1[i, 3], tlbrs2[j, 3]) - max(tlbrs1[i, 1], tlbrs2[j, 1]) + 1
if iw > 0 and ih > 0:
area_inter = iw * ih
area_union -= area_inter
iou = area_inter / area_union
ew = max(tlbrs1[i, 2], tlbrs2[j, 2]) - min(tlbrs1[i, 0], tlbrs2[j, 0]) + 1
eh = max(tlbrs1[i, 3], tlbrs2[j, 3]) - min(tlbrs1[i, 1], tlbrs2[j, 1]) + 1
area_encls = ew * eh
giou = iou - (area_encls - area_union) / area_encls
Y[i, j] = (1. - giou) * 0.5
return Y | 40dcd6b59f350f167ab8cf31be425e98671243d4 | 3,988 |
def easter(date):
"""Calculate the date of the easter.
Requires a datetime type object. Returns a datetime object with the
date of easter for the passed object's year.
"""
if 1583 <= date.year < 10000: # Delambre's method
b = date.year / 100 # Take the firsts two digits of the year.
h = (((19 * (date.year % 19) + b - (b / 4)) -
((b - ((b + 8) / 25) + 1) / 3) + 15) % 30)
k = ((32 + 2 * (b % 4) + 2 * ((date.year % 100) / 4) - h -
((year % 100) % 4)) % 7)
m = ((date.year % 19) + 11 * h + 22 * k) / 451
return datetime.date(date.year, (h + k - 7 * m + 114) / 31,
((h + k - 7 * m + 114) % 31) + 1)
elif 1 <= date.year < 1583: # Julian calendar
d = (19 * (date.year % 19) + 15) % 30
e = (2 * (date.year % 4) + 4 * (date.year % 7) - d + 34) % 7
return datetime.date(date.year, (d + e + 114) / 31,
((d + e + 114) % 31) + 1)
else: # Negative value
raise ValueError, "Invalid year: %d." % year | 90bfaf56fb5164cdfb185f430ca11e7a5d9c2785 | 3,989 |
from typing import Dict
def state_mahalanobis(od: Mahalanobis) -> Dict:
"""
Mahalanobis parameters to save.
Parameters
----------
od
Outlier detector object.
"""
state_dict = {'threshold': od.threshold,
'n_components': od.n_components,
'std_clip': od.std_clip,
'start_clip': od.start_clip,
'max_n': od.max_n,
'cat_vars': od.cat_vars,
'ohe': od.ohe,
'd_abs': od.d_abs,
'clip': od.clip,
'mean': od.mean,
'C': od.C,
'n': od.n}
return state_dict | 7be602c5a0c89d67adc223c911abccd96d359664 | 3,990 |
def show_table(table, **options):
"""
Displays a table without asking for input from the user.
:param table: a :class:`Table` instance
:param options: all :class:`Table` options supported, see :class:`Table` documentation for details
:return: None
"""
return table.show_table(**options) | ec040d4a68d2b3cb93493f336daf1aa63289756e | 3,991 |
def create_client(name, func):
"""Creating resources/clients for all needed infrastructure: EC2, S3, IAM, Redshift
Keyword arguments:
name -- the name of the AWS service resource/client
func -- the boto3 function object (e.g. boto3.resource/boto3.client)
"""
print("Creating client for", name)
return func(name,
region_name=DWH_REGION,
aws_access_key_id=KEY,
aws_secret_access_key=SECRET) | a688c36918ebb4bc76ee1594c6f4cca638587d7d | 3,992 |
def hamming(s0, s1):
"""
>>> hamming('ABCD', 'AXCY')
2
"""
assert len(s0) == len(s1)
return sum(c0 != c1 for c0, c1 in zip(s0, s1)) | efaba3e6aca8349b0dc5df575b937ba67a148d0e | 3,993 |
import pickle
def load_embeddings(topic):
"""
Load TSNE 2D Embeddings generated from fitting BlazingText on the news articles.
"""
print(topic)
embeddings = pickle.load(
open(f'covidash/data/{topic}/blazing_text/embeddings.pickle', 'rb'))
labels = pickle.load(
open(f'covidash/data/{topic}/blazing_text/labels.pickle', 'rb'))
if '</s>' in labels:
labels.remove('</s>')
embeddings = embeddings[:len(labels), :]
return embeddings, labels | de2f74c7e467e0f057c10a0bc15b79ee9eecb40f | 3,994 |
import shutil
import os
import sys
def get_EAC_macro_log(year,DOY,dest_path):
"""
Copy the EAC macro processor log
This gets the macro processor log which is created by the 'at' script
which starts the macro processor.
Notes
=====
This uses find_EAC_macro_log() to get the log names.
@param year : Year of observation
@param DOY : Day of observation
@param dest_path : Full path to the destination directory.
@return: list
EAC macro processor logs copied.
"""
print("Entered get_EAC_macro_log for",year,DOY,dest_path)
pm_logs = find_EAC_macro_log(year,DOY)
if pm_logs!= None:
# We found one or more logs
for f in pm_logs:
try:
shutil.copy(f,dest_path)
print(os.path.basename(f),"copied to",dest_path)
except:
print("Could not copy",os.path.basename(f),'because', sys.exc_info()[0])
return pm_logs | 2cc91ef42eef883f35917b41a29a9578fbfc6fa8 | 3,995 |
def mosaic_cut(image, original_width, original_height, width, height, center,
ptop, pleft, pbottom, pright, shiftx, shifty):
"""Generates a random center location to use for the mosaic operation.
Given a center location, cuts the input image into a slice that will be
concatenated with other slices with the same center in order to construct
a final mosaicked image.
Args:
image: `Tensor` of shape [None, None, 3] that needs to be altered.
original_width: `float` value indicating the original width of the image.
original_height: `float` value indicating the original height of the image.
width: `float` value indicating the final width of the image.
height: `float` value indicating the final height of the image.
center: `float` value indicating the desired center of the final patched
image.
ptop: `float` value indicating the top of the image without padding.
pleft: `float` value indicating the left of the image without padding.
pbottom: `float` value indicating the bottom of the image without padding.
pright: `float` value indicating the right of the image without padding.
shiftx: `float` 0.0 or 1.0 value indicating if the image is on the left or
right.
shifty: `float` 0.0 or 1.0 value indicating if the image is at the top or
bottom.
Returns:
image: The cropped image in the same datatype as the input image.
crop_info: `float` tensor that is applied to the boxes in order to select
the boxes still contained within the image.
"""
def cast(values, dtype):
return [tf.cast(value, dtype) for value in values]
with tf.name_scope('mosaic_cut'):
center = tf.cast(center, width.dtype)
zero = tf.cast(0.0, width.dtype)
cut_x, cut_y = center[1], center[0]
# Select the crop of the image to use
left_shift = tf.minimum(
tf.minimum(cut_x, tf.maximum(zero, -pleft * width / original_width)),
width - cut_x)
top_shift = tf.minimum(
tf.minimum(cut_y, tf.maximum(zero, -ptop * height / original_height)),
height - cut_y)
right_shift = tf.minimum(
tf.minimum(width - cut_x,
tf.maximum(zero, -pright * width / original_width)), cut_x)
bot_shift = tf.minimum(
tf.minimum(height - cut_y,
tf.maximum(zero, -pbottom * height / original_height)),
cut_y)
(left_shift, top_shift, right_shift, bot_shift,
zero) = cast([left_shift, top_shift, right_shift, bot_shift, zero],
tf.float32)
# Build a crop offset and a crop size tensor to use for slicing.
crop_offset = [zero, zero, zero]
crop_size = [zero - 1, zero - 1, zero - 1]
if shiftx == 0.0 and shifty == 0.0:
crop_offset = [top_shift, left_shift, zero]
crop_size = [cut_y, cut_x, zero - 1]
elif shiftx == 1.0 and shifty == 0.0:
crop_offset = [top_shift, cut_x - right_shift, zero]
crop_size = [cut_y, width - cut_x, zero - 1]
elif shiftx == 0.0 and shifty == 1.0:
crop_offset = [cut_y - bot_shift, left_shift, zero]
crop_size = [height - cut_y, cut_x, zero - 1]
elif shiftx == 1.0 and shifty == 1.0:
crop_offset = [cut_y - bot_shift, cut_x - right_shift, zero]
crop_size = [height - cut_y, width - cut_x, zero - 1]
# Contain and crop the image.
ishape = tf.cast(tf.shape(image)[:2], crop_size[0].dtype)
crop_size[0] = tf.minimum(crop_size[0], ishape[0])
crop_size[1] = tf.minimum(crop_size[1], ishape[1])
crop_offset = tf.cast(crop_offset, tf.int32)
crop_size = tf.cast(crop_size, tf.int32)
image = tf.slice(image, crop_offset, crop_size)
crop_info = tf.stack([
tf.cast(ishape, tf.float32),
tf.cast(tf.shape(image)[:2], dtype=tf.float32),
tf.ones_like(ishape, dtype=tf.float32),
tf.cast(crop_offset[:2], tf.float32)
])
return image, crop_info | 2874ea65a695d7ebebf218e5a290069a9f3c1e8e | 3,996 |
import requests
def get_children_info(category_id: str) -> list[dict]:
"""Get information about children categories of the current category.
:param: category_id: category id.
:return: info about children categories.
"""
# Create the URL
url = f'{POINT}/resources/v2/title/domains/{DOMAIN}/' \
f'categories/{category_id}/children'
# Request
response = requests.get(url, params=REQUEST_PARAMS,
headers=REQUEST_HEADERS)
# If error
if not response:
# Raise exception to retry request by decorator
raise RequestException()
# Extract data
children_data = response.json().get('data')
if children_data:
return children_data['categories']
return [] | f5a651c1f58c75ee56d1140ee41dc6dd39570f88 | 3,997 |
from datetime import datetime
def GetTypedValue(field_type, value):
"""Returns a typed value based on a schema description and string value.
BigQuery's Query() method returns a JSON string that has all values stored
as strings, though the schema contains the necessary type information. This
method provides conversion services to make it easy to persist the data in
your JSON as "typed" data.
Args:
field_type: The field type (as defined by BigQuery).
value: The field value, typed as a string.
Returns:
A value of the appropriate type.
Raises:
NotSupportedError: Raised if the field type is not supported.
"""
if value is None:
return None
if field_type == FieldTypes.STRING:
return value
if field_type == FieldTypes.INTEGER:
if value == 'NaN':
return None
else:
return int(value)
if field_type == FieldTypes.FLOAT:
if value == 'NaN':
return None
else:
return float(value)
if field_type == FieldTypes.TIMESTAMP:
if value == 'NaN':
return None
else:
dt = datetime.datetime.utcfromtimestamp(float(value))
return dt.isoformat(' ')
if field_type == FieldTypes.BOOLEAN:
return value.lower() == 'true'
else:
raise NotSupportedError(
'Type {field_type} is not supported.'.format(field_type=field_type)) | 8e6198d089bae4e1044b2998da97a8cbcf6130b2 | 3,998 |
def predict_from_file(audio_file,
hop_length=None,
fmin=50.,
fmax=MAX_FMAX,
model='full',
decoder=torchcrepe.decode.viterbi,
return_harmonicity=False,
return_periodicity=False,
batch_size=None,
device='cpu',
pad=True):
"""Performs pitch estimation from file on disk
Arguments
audio_file (string)
The file to perform pitch tracking on
hop_length (int)
The hop_length in samples
fmin (float)
The minimum allowable frequency in Hz
fmax (float)
The maximum allowable frequency in Hz
model (string)
The model capacity. One of 'full' or 'tiny'.
decoder (function)
The decoder to use. See decode.py for decoders.
return_harmonicity (bool) [DEPRECATED]
Whether to also return the network confidence
return_periodicity (bool)
Whether to also return the network confidence
batch_size (int)
The number of frames per batch
device (string)
The device used to run inference
pad (bool)
Whether to zero-pad the audio
Returns
pitch (torch.tensor [shape=(1, 1 + int(time // hop_length))])
(Optional) periodicity (torch.tensor
[shape=(1, 1 + int(time // hop_length))])
"""
# Load audio
audio, sample_rate = torchcrepe.load.audio(audio_file)
# Predict
return predict(audio,
sample_rate,
hop_length,
fmin,
fmax,
model,
decoder,
return_harmonicity,
return_periodicity,
batch_size,
device,
pad) | 7e1f8036e5d0506f28a4b36b9e23c2d4a0237218 | 3,999 |
Subsets and Splits