content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def wilson_primality_test(n: int) -> bool:
"""
https://en.wikipedia.org/wiki/Wilson%27s_theorem
>>> assert all(wilson_primality_test(i) for i in [2, 3, 5, 7, 11])
>>> assert not all(wilson_primality_test(i) for i in [4, 6, 8, 9, 10])
"""
return ((factorial_lru(n - 1) + 1) % n) == 0
|
809415a5bd5a4ee4c19cc41a4616e91e17574a09
| 18,100 |
def project_from_id(request):
"""
Given a request returns a project instance or throws
APIUnauthorized.
"""
try:
pm = ProjectMember.objects.get(
user=request.user,
project=request.GET['project_id'],
)
except ProjectMember.DoesNotExist:
raise APIUnauthorized()
return pm.project
|
d23daddfacf736b835bdd10594d99dd4d4e5a0fe
| 18,101 |
from typing import Union
import logging
from typing import Optional
from typing import Tuple
import os
def addRotatingFileHandler(logger: Union[logging.Logger, str],
fName: Optional[str] = None,
dirPath: Optional[str] = None,
fmt: Optional[logging.Formatter] = None,
level: int = logging.DEBUG,
maxSizeKb: int = DEFAULT_ROT_LOG_FILE_SIZE_KB,
backupCount: int = DEFAULT_ROT_LOG_FILE_COUNT) -> Tuple[
logging.handlers.RotatingFileHandler,
str]:
""" Add rotating file handler to logger instance.
Args:
logger: logger instance or logger name.
fName: name of a log file. If there is no file extension, default
``DEFAULT_LOG_FILE_EXT`` is appended. If ``None``, logger name
is used as a file name.
dirPath: path to a folder where logs will be stored. If ``None``,
path is fetched with :func:`getDefaultLogDirPath()`. If log
folder does not exist, it is created.
maxSizeKb: number of KB at which rollover is performed on a
current log file.
backupCount: number of files to store (if file with given name
already exists).
fmt: Optional custom formatter for created handler. By default,
DEFAULT_FORMATTER and DEFAULT_FORMATTER_TIME is used.
level: Log level for this specific handler. By default,
everything is logged (``DEBUG`` level).
Returns:
A tuple: (created rotating file handler, file path).
"""
logger = _getLogger(logger)
fName = getFileName(logger, fName)
if dirPath is None:
dirPath = getDefaultLogDirPath() # pragma: no cover
else:
dirPath = os.path.normpath(dirPath)
dlpt.pth.createFolder(dirPath)
fPath = os.path.join(dirPath, fName)
if fmt is None: # pragma: no cover
fmt = logging.Formatter(DEFAULT_FMT,
datefmt=DEFAULT_FMT_TIME)
hdlr = logging.handlers.RotatingFileHandler(
fPath,
maxBytes=int(maxSizeKb * 1e3),
backupCount=backupCount)
hdlr.setLevel(level)
hdlr.setFormatter(fmt)
logger.addHandler(hdlr)
return (hdlr, fPath)
|
3d8acc71a4be116cbf075ecc20d5221e48b127fd
| 18,102 |
def make_beampipe_from_end(pipe_aperture, pipe_length, loc=(0, 0, 0), rotation_angles=(0, 0, 0)):
"""Takes an aperture and creates a pipe.
The centre of the face of aperture1 will be at loc and rotations will happen
about that point.
Assumes the aperture is initially centred on (0,0,0)
Args:
pipe_aperture (FreeCad wire): Outline of aperture.
pipe_length (float): Length of pipe.
loc (tuple): The co ordinates of the final location of the
centre of the pipe.
rotation_angles (tuple) : The angles to rotate about in the three
cartesian directions.
Returns:
p (FreeCad shape): A model of the pipe.
"""
p = pipe_aperture.extrude(Base.Vector(pipe_length, 0, 0))
p.rotate(
Base.Vector(0, 0, 0), Base.Vector(0, 0, 1), rotation_angles[2]
) # Rotate around Z
p.rotate(
Base.Vector(0, 0, 0), Base.Vector(1, 0, 0), rotation_angles[0]
) # Rotate around X
p.rotate(
Base.Vector(0, 0, 0), Base.Vector(0, 1, 0), rotation_angles[1]
) # Rotate around Y
p.translate(Base.Vector(loc[0], loc[1], loc[2])) # Move to be centred on loc
return p
|
a3b85995165ac2b11d9d6d0014b68356996e97b9
| 18,103 |
import urllib
def encode_string(value):
"""Replace and encode all special characters in the passed string.
Single quotation marks need to be doubled. Therefore, if the string contains a single
quotation mark, it is going to be replaced by a pair of such quotation marks.
"""
value = value.replace('\'', '\'\'')
return urllib.parse.quote(value, safe='')
|
a6e30e834eb9b4d1d5882b7b24eec0da28ed5f4c
| 18,104 |
async def make_json_photo():
"""Photo from web camera in base64.
"""
img, _ = get_png_photo()
if img:
result = {"image": png_img_to_base64(img)}
else:
result = {"error": "Camera not available"}
return result
|
d498d8d47a995125f0480c069b1459156875f2b7
| 18,105 |
def near_field_point_matching(source, position, size, k, lmax, sampling):
"""Decompose a source into VSHs using the point matching method in the near field
Returns p_src[2,rmax]
Arguments:
source source object
position position around which to decompose
size size of xy planar region to perform point matching over
k medium wavenumber
lmax maximum number of multipoles
sampling number of sampling points along a dimension
"""
points = sample_plane_point_matching(position, size, sampling)
Npoints = points.shape[1]
X = points[0]
Y = points[1]
Z = points[2]
RAD, THETA, PHI = coordinates.cart_to_sph(X, Y, Z + 1e-9, origin=position)
rmax = vsh.lmax_to_rmax(lmax)
E_src = source.E_field(X, Y, Z, k)[:2]
H_src = source.H_field(X, Y, Z, k)[:2]
# TODO: is this true?
# H_src = E_src[::-1]
E_vsh = np.zeros([2, Npoints, 2, rmax], dtype=complex)
for i,n,m in vsh.mode_indices(lmax):
Nfunc, Mfunc = vsh.VSH(n, m, mode=vsh.vsh_mode.incident)
Emn_val = vsh.Emn(m, n)
E_vsh[...,0,i] = -1j*Emn_val*coordinates.vec_sph_to_cart(Nfunc(RAD, THETA, PHI, k), THETA, PHI)[:2]
E_vsh[...,1,i] = -1j*Emn_val*coordinates.vec_sph_to_cart(Mfunc(RAD, THETA, PHI, k), THETA, PHI)[:2]
H_vsh = -1j*E_vsh[...,::-1,:]
column = np.concatenate([E_src.reshape([-1]), H_src.reshape([-1])])
matrix = np.concatenate([E_vsh.reshape([-1, 2*rmax]), H_vsh.reshape([-1, 2*rmax])])
sol = np.linalg.lstsq(matrix, column, rcond=None)
p_src = sol[0]
return np.reshape(p_src, [2,rmax])
|
32772b6cc15ea4f96a2356969118e3e339bb9382
| 18,106 |
from typing import Any
from typing import Union
from typing import List
from typing import Dict
from typing import Type
def Option(
default: Any = MISSING,
*,
name: str = MISSING,
description: str = MISSING,
required: bool = MISSING,
choices: Union[List[Union[str, int, float]], Dict[str, Union[str, int, float]]] = MISSING,
min: int = MISSING,
max: int = MISSING,
type: Type[Any] = MISSING,
cls: Type[Any] = __option.OptionClass
) -> Any:
"""Interaction option, should be set as a default to a parameter.
The `cls` parameter can be used if you want to use a custom Option
class, you can use `functools.partial()` as to not repeat the kwarg.
Parameters:
default:
Default value when the option is not passed, makes the option
optional so that it can be omitted.
name:
Name of the option in the Discord client. By default it uses
the name of the parameter.
description: Description of the option.
required:
Whether the option can be omitted. If a default is passed this is
automatically set implicitly.
choices: Set choices that the user can pick from in the Discord client.
min: Smallest number that can be entered for number types.
max: Biggest number that can be entered for number types.
type:
The type of the option, overriding the annotation. This can be
a `ApplicationCommandOption` value or any type.
cls: The class to use, defaults to `OptionClass`.
Returns:
The `cls` parameter (`OptionClass` by default) disguised as
`typing.Any`. This way this function can be used as a default without
violating static type checkers.
"""
return cls(
default, name=name, description=description,
required=required, choices=choices,
min=min, max=max, type=type
)
|
a344edbcdbc4211adebd072f0daaf20a6abc657e
| 18,107 |
def inverse(f, a, b, num_iters=64):
"""
For a function f that is monotonically increasing on the interval (a, b),
returns the function f^{-1}
"""
if a >= b:
raise ValueError(f"Invalid interval ({a}, {b})")
def g(y):
if y > f(b) or y < f(a):
raise ValueError(f"Invalid image ({y})")
lower = a
upper = b
for _ in range(num_iters):
mid = average(lower, upper)
if f(mid) < y:
lower = mid
elif f(mid) > y:
upper = mid
else:
return mid
return mid
return g
|
5d0b3c990d20d486f70bff2a5569920134d71ea1
| 18,108 |
def gmof(x, sigma):
"""
Geman-McClure error function
"""
x_squared = x ** 2
sigma_squared = sigma ** 2
return (sigma_squared * x_squared) / (sigma_squared + x_squared)
|
63448c03e826874df1c6c10f053e1b1e917b6a98
| 18,109 |
import tqdm
def download_data(vars):
"""
function to download data from the ACS website
:param:
geo_level (geoLevel object): which geophical granularity to obtain for the data
vars (string): a file name that holds 3-tuples of the variables,
(in the format returned by censusdata.search()),
where first is the variable id, and second is the variable header.
:return:
a pandas.DataFrame object
"""
gl = geoLevel(geo_level_name)
print(f"Getting {gl.name} level geographies...")
geographies = get_censusgeos(gl)
vars, headers = get_variables(vars)
data = []
print("Downloading selected variables for these geographies...")
for geo in tqdm(geographies):
local_data = censusdata.download(data_source, year, geo, vars, tabletype=tabletype, key=API_KEY)
data.append(local_data)
data = pd.concat(data)
data.columns = headers
data = fix_index(data)
return data
|
a333eb2565736a6509cc3760de35fae8bc020c5e
| 18,110 |
def oddify(n):
"""Ensure number is odd by incrementing if even
"""
return n if n % 2 else n + 1
|
dee98063cb904cf462792d15129bd90a4b50bd28
| 18,111 |
from typing import List
import re
def method_matching(pattern: str) -> List[str]:
"""Find all methods matching the given regular expression."""
_assert_loaded()
regex = re.compile(pattern)
return sorted(filter(lambda name: re.search(regex, name), __index.keys()))
|
b4a4b1effcd2359e88022b28254ed247724df184
| 18,112 |
def update_binwise_positions(cnarr, segments=None, variants=None):
"""Convert start/end positions from genomic to bin-wise coordinates.
Instead of chromosomal basepairs, the positions indicate enumerated bins.
Revise the start and end values for all GenomicArray instances at once,
where the `cnarr` bins are mapped to corresponding `segments`, and
`variants` are grouped into `cnarr` bins as well -- if multiple `variants`
rows fall within a single bin, equally-spaced fractional positions are used.
Returns copies of the 3 input objects with revised `start` and `end` arrays.
"""
cnarr = cnarr.copy()
if segments:
segments = segments.copy()
seg_chroms = set(segments.chromosome.unique())
if variants:
variants = variants.copy()
var_chroms = set(variants.chromosome.unique())
# ENH: look into pandas groupby innards to get group indices
for chrom in cnarr.chromosome.unique():
# Enumerate bins, starting from 0
# NB: plotted points will be at +0.5 offsets
c_idx = (cnarr.chromosome == chrom)
c_bins = cnarr[c_idx]#.copy()
if segments and chrom in seg_chroms:
# Match segment boundaries to enumerated bins
c_seg_idx = (segments.chromosome == chrom).values
seg_starts = np.searchsorted(c_bins.start.values,
segments.start.values[c_seg_idx])
seg_ends = np.r_[seg_starts[1:], len(c_bins)]
segments.data.loc[c_seg_idx, "start"] = seg_starts
segments.data.loc[c_seg_idx, "end"] = seg_ends
if variants and chrom in var_chroms:
# Match variant positions to enumerated bins, and
# add fractional increments to multiple variants within 1 bin
c_varr_idx = (variants.chromosome == chrom).values
c_varr_df = variants.data[c_varr_idx]
# Get binwise start indices of the variants
v_starts = np.searchsorted(c_bins.start.values,
c_varr_df.start.values)
# Overwrite runs of repeats with fractional increments,
# adding the cumulative fraction to each repeat
for idx, size in list(get_repeat_slices(v_starts)):
v_starts[idx] += np.arange(size) / size
variant_sizes = c_varr_df.end - c_varr_df.start
variants.data.loc[c_varr_idx, "start"] = v_starts
variants.data.loc[c_varr_idx, "end"] = v_starts + variant_sizes
c_starts = np.arange(len(c_bins)) # c_idx.sum())
c_ends = np.arange(1, len(c_bins) + 1)
cnarr.data.loc[c_idx, "start"] = c_starts
cnarr.data.loc[c_idx, "end"] = c_ends
return cnarr, segments, variants
|
f42780517cde35d2297620dcaf046ea0a111a7b9
| 18,113 |
async def respond_wrong_author(
ctx: InteractionContext, author_must_be: Member | SnakeBotUser, hidden: bool = True
) -> bool:
"""Respond to the given context"""
if not ctx.responded:
await ctx.send(
ephemeral=hidden,
embeds=embed_message(
"Error",
f"The author of the message must be {author_must_be.mention}\nPlease try again",
),
)
return True
return False
|
a39e3672dd639e0183beb30c6ebfec324dfc96de
| 18,114 |
def ParseFloatingIPTable(output):
"""Returns a list of dicts with floating IPs."""
keys = ('id', 'ip', 'instance_id', 'fixed_ip', 'pool',)
floating_ip_list = ParseNovaTable(output, FIVE_COLUMNS_PATTERN, keys)
for floating_ip in floating_ip_list:
if floating_ip['instance_id'] == '-':
floating_ip['instance_id'] = None
if floating_ip['fixed_ip'] == '-':
floating_ip['fixed_ip'] = None
return floating_ip_list
|
691d9c0525cee5f4b6b9c56c4e21728c24e46f48
| 18,115 |
import os
def build_entity_bucket(config, server):
"""プロジェクト構成ファイルからエンティティバケットを構築するファクトリ関数
Args:
config (str): ファイル名
server (str): サーバ名
Returns:
(Server, EntityBucket): サーバとエンティティバケットを返す
"""
server_, entity_bucket = Parser().parse(config, server, context=os.environ)
return server_, entity_bucket
|
f3b3ac3b32760922c9e61e162db5688e06132836
| 18,116 |
def test_logging_to_progress_bar_with_reserved_key(tmpdir):
""" Test that logging a metric with a reserved name to the progress bar raises a warning. """
class TestModel(BoringModel):
def training_step(self, *args, **kwargs):
output = super().training_step(*args, **kwargs)
self.log("loss", output["loss"], prog_bar=True)
return output
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=2,
)
with pytest.warns(UserWarning, match="The progress bar already tracks a metric with the .* 'loss'"):
trainer.fit(model)
|
827719942bed424def0753af9c4b6757b5f6cdf0
| 18,117 |
def cdf_inverse(m, alpha, capacity, f, subint):
"""
This function computes the inverse value of a specific probability for
a given distribution.
Args:
m (mesh): The initial mesh.
alpha (float): The probability for which the inverse value is computed.
capacity (float): The capacity of the power generation for each hour
of interest.
f (pdf): The distribution of the random variable (X1+...+Xn), which has
to be a pdf.
subint (int): The number of subintervalls, which are used to
interpolate the cdf.
Returns:
inverse_bound (float): The computed inverse value of alpha.
"""
x = np.linspace(0, capacity, subint)
y = []
for i in x:
yi = multi_cdf(m, i, capacity, f)
j = int(np.argwhere(x==i))
y.append(yi)
if (j == 0) and (yi > alpha):
inverse_alpha = 0
break
elif (j != 0):
if y[j-1] <= alpha <= y[j]:
lin = interp1d([y[j-1], y[j]], [x[j-1], x[j]])
inverse_alpha = lin(alpha)
break
else:
inverse_alpha = capacity
return inverse_alpha
|
cb0609a1f5049a910aaec63b9d6ed311a1fdc263
| 18,118 |
def concatenation(clean_list):
"""
Concatenation example.
Takes the processed list for your emails and concatenates any elements that are currently separate that you may
wish to have as one element, such as dates.
E.g. ['19', 'Feb', '2018'] becomes ['19 Feb 2018]
Works best if the lists are similar as it works by using the index of an element and joining it to other elements
using a positive or negative index.
"""
index_of_item = clean_list.index("your chosen item")
clean_list[:index_of_item] = [' '.join(clean_list[:index_of_item])] # joins together every element from start to the index of the item
# to join elements mid-list:
another_index = clean_list.index("another item") # date concatenation
date_start = another_index - 3
date_end = another_index
clean_list[date_start:date_end] = [' '.join(clean_list[date_start:date_end])] # joins the 3 elements before 'another item' index
return clean_list
|
59b727f21e663f2836f6fe939f4979e9f7484f62
| 18,119 |
def add_pattern_bd(x, distance=2, pixel_value=1):
"""
Augments a matrix by setting a checkboard-like pattern of values some `distance` away from the bottom-right
edge to 1. Works for single images or a batch of images.
:param x: N X W X H matrix or W X H matrix. will apply to last 2
:type x: `np.ndarray`
:param distance: distance from bottom-right walls. defaults to 2
:type distance: `int`
:param pixel_value: Value used to replace the entries of the image matrix
:type pixel_value: `int`
:return: augmented matrix
:rtype: np.ndarray
"""
x = np.array(x)
shape = x.shape
if len(shape) == 3:
width, height = x.shape[1:]
x[:, width - distance, height - distance] = pixel_value
x[:, width - distance - 1, height - distance - 1] = pixel_value
x[:, width - distance, height - distance - 2] = pixel_value
x[:, width - distance - 2, height - distance] = pixel_value
elif len(shape) == 2:
width, height = x.shape
x[width - distance, height - distance] = pixel_value
x[width - distance - 1, height - distance - 1] = pixel_value
x[width - distance, height - distance - 2] = pixel_value
x[width - distance - 2, height - distance] = pixel_value
else:
raise RuntimeError('Do not support numpy arrays of shape ' + str(shape))
return x
|
1f545a472d6d25f23922c133fb0ef0d11307cca1
| 18,120 |
import sys
import hashlib
def calc_md5_sign(secret, parameters):
"""
根据app_secret和参数串计算md5 sign,参数支持dict(建议)和str
:param secret: str
:param parameters:
:return:
"""
if hasattr(parameters, "items"):
keys = list(parameters.keys())
keys.sort()
parameters_str = "%s%s%s" % (secret,
''.join('%s%s' % (key, parameters[key]) for key in keys),
secret)
else:
parameters_str = parameters
if sys.version_info >= (3, 0): # python3内置unicode支持,直接编码即可
parameters_str = parameters_str.encode(encoding='utf-8')
else: # py2 还要检测unicode
parameters_str = mixStr_py2(parameters_str)
sign_hex = hashlib.md5(parameters_str).hexdigest().upper()
return sign_hex
|
f13cd469a86942c011f3d419a4a2cf89c79cf2df
| 18,121 |
def tokenize(string):
"""
Scans the entire message to find all Content-Types and boundaries.
"""
tokens = deque()
for m in _RE_TOKENIZER.finditer(string):
if m.group(_CTYPE):
name, token = parsing.parse_header(m.group(_CTYPE))
elif m.group(_BOUNDARY):
token = Boundary(m.group(_BOUNDARY).strip("\t\r\n"),
_grab_newline(m.start(), string, -1),
_grab_newline(m.end(), string, 1))
else:
token = _EMPTY_LINE
tokens.append(token)
return _filter_false_tokens(tokens)
|
0121f9242a5af4611edc2fd28b8af65c5b09078d
| 18,122 |
def query(obj,desc=None):
"""create a response to 'describe' cmd from k8s pod desc and optional custom properties desc """
# this is a simplified version compared to what the k8s servo has (single container only); if we change it to multiple containers, they will be the app's components (here the app is a single pod, unlike servo-k8s where 'app = k8s deployment'
if not desc:
desc = {"application":{}}
elif not desc.get("application"):
desc["application"] = {}
comps = desc["application"].setdefault("components", {})
c = obj["spec"]["containers"][0]
cn = c["name"]
comp=comps.setdefault(cn, {})
settings = comp.setdefault("settings", {})
r = c.get("resources")
if r:
settings["mem"] = numval(memunits(r.get("limits",{}).get("memory","0")), 0, MAX_MEM, MEM_STEP) # (value,min,max,step)
settings["cpu"] = numval(cpuunits(r.get("limits",{}).get("cpu","0")), 0, MAX_CPU, CPU_STEP) # (value,min,max,step)
for ev in c.get("env",[]):
# skip env vars that match the pre-defined setting names above
if ev["name"] in ("mem","cpu","replicas"):
continue
if ev["name"] in settings:
s = settings[ev["name"]]
if s.get("type", "linear") == "linear":
try:
s["value"] = float(ev["value"])
except ValueError:
raise ConfigError("invalid value found in environment {}={}, it was expected to be numeric".format(ev["name"],ev["value"]))
else:
s["value"] = ev["value"]
return desc
|
bce425c503c3c779c6f397020061ccee3150b562
| 18,123 |
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
name: A name
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss"):
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps)))
|
f16441fe921b550986604c2c7513d9737fc230b3
| 18,124 |
from django.core.files.uploadedfile import InMemoryUploadedFile, TemporaryUploadedFile
import os
import json
import requests
import base64
import time
def upload_and_rec_beauty(request):
"""
upload and recognize image
:param request:
:return:
"""
image_dir = 'cv/static/FaceUpload'
if not os.path.exists(image_dir):
os.makedirs(image_dir)
result = {}
imagepath = ''
if request.method == "POST":
image = request.FILES.get("image", None)
if not isinstance(image, InMemoryUploadedFile) and not isinstance(image, TemporaryUploadedFile):
imgstr = request.POST.get("image", None)
if imgstr is None or imgstr.strip() == '':
result['code'] = 1
result['msg'] = 'Invalid Image'
result['data'] = None
result['elapse'] = 0
json_result = json.dumps(result, ensure_ascii=False)
return HttpResponse(json_result)
elif 'http://' in imgstr or 'https://' in imgstr:
response = requests.get(imgstr)
image = np.asarray(bytearray(response.content), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
else:
img_base64 = base64.b64decode(imgstr)
image = np.frombuffer(img_base64, dtype=np.float64)
else:
if image is None:
result['code'] = 1
result['msg'] = 'Invalid Image'
result['data'] = None
result['elapse'] = 0
json_result = json.dumps(result, ensure_ascii=False)
return HttpResponse(json_result)
destination = open(os.path.join(image_dir, image.name), 'wb+')
for chunk in image.chunks():
destination.write(chunk)
destination.close()
imagepath = URL_PORT + '/static/FaceUpload/' + image.name
image = 'cv/static/FaceUpload/' + image.name
tik = time.time()
res = beauty_recognizer.infer(image)
if len(res['mtcnn']) > 0:
result['code'] = 0
result['msg'] = 'Success'
result['data'] = {
'imgpath': imagepath,
'beauty': round(res['beauty'], 2),
'detection': res['mtcnn']
}
result['elapse'] = round(time.time() - tik, 2)
else:
result['code'] = 3
result['msg'] = 'None face is detected'
result['data'] = []
result['elapse'] = round(time.time() - tik, 2)
json_str = json.dumps(result, ensure_ascii=False)
return HttpResponse(json_str)
else:
result['code'] = 2
result['msg'] = 'invalid HTTP method'
result['data'] = None
json_result = json.dumps(result, ensure_ascii=False)
return HttpResponse(json_result)
|
0088f8bc5ddaba461ea74a4d156c577b231cab96
| 18,125 |
def weld_standard_deviation(array, weld_type):
"""Returns the *sample* standard deviation of the array.
Parameters
----------
array : numpy.ndarray or WeldObject
Input array.
weld_type : WeldType
Type of each element in the input array.
Returns
-------
WeldObject
Representation of this computation.
"""
weld_obj_var = weld_variance(array, weld_type)
obj_id, weld_obj = create_weld_object(weld_obj_var)
weld_obj_var_id = get_weld_obj_id(weld_obj, weld_obj_var)
weld_template = _weld_std_code
weld_obj.weld_code = weld_template.format(var=weld_obj_var_id)
return weld_obj
|
763b96ef9efa36f7911e50b313bbc29489a5d5bd
| 18,126 |
def dev_to_abs_pos(dev_pos):
"""
When device position is 30000000, absolute position from home is 25mm
factor = 30000000/25
"""
global CONVFACTOR
abs_pos = dev_pos*(1/CONVFACTOR)
return abs_pos
|
74800f07cdb92b7fdf2ec84dfc606195fceef86b
| 18,127 |
import torch
def model_predict(model, test_loader, device):
"""
Predict data in dataloader using model
"""
# Set model to eval mode
model.eval()
# Predict without computing gradients
with torch.no_grad():
y_preds = []
y_true = []
for inputs, labels in test_loader:
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
y_preds.append(preds)
y_true.append(labels)
y_preds = torch.cat(y_preds).tolist()
y_true = torch.cat(y_true).tolist()
return y_preds, y_true
|
0b43a28046c1de85711f7db1b3e64dfd95f11905
| 18,128 |
def calc_precision_recall(frame_results):
"""Calculates precision and recall from the set of frames by summing the true positives,
false positives, and false negatives for each frame.
Args:
frame_results (dict): dictionary formatted like:
{
'frame1': {'true_pos': int, 'false_pos': int, 'false_neg': int},
'frame2': ...
...
}
Returns:
tuple: of floats of (precision, recall)
"""
true_pos = 0
false_pos = 0
false_neg = 0
for _, res in frame_results.items():
true_pos += res["true_pos"]
false_pos += res["false_pos"]
false_neg += res["false_neg"]
try:
precision = true_pos / (true_pos + false_pos)
except ZeroDivisionError:
precision = 0.0
try:
recall = true_pos / (true_pos + false_neg)
except ZeroDivisionError:
recall = 0.0
return precision, recall
|
7389050a73a1e368222941090991883f6c6a89b7
| 18,129 |
import logging
def patch_fixture(
scope="function",
services=None,
autouse=False,
docker_client=None,
region_name=constants.DEFAULT_AWS_REGION,
kinesis_error_probability=0.0,
dynamodb_error_probability=0.0,
container_log_level=logging.DEBUG,
localstack_verison="latest",
auto_remove=True,
pull_image=True,
container_name=None,
**kwargs
):
"""Create a pytest fixture that temporarially redirects all botocore
sessions and clients to a Localstack container.
This is not a fixture! It is a factory to create them.
The fixtures that are created by this function will run a Localstack
container and patch botocore to direct traffic there for the duration
of the tests.
Since boto3 uses botocore to send requests, boto3 will also be redirected.
Args:
scope (str, optional): The pytest scope which this fixture will use.
Defaults to :const:`"function"`.
services (list, dict, optional): One of
- A :class:`list` of AWS service names to start in the
Localstack container.
- A :class:`dict` of service names to the port they should run on.
Defaults to all services. Setting this
can reduce container startup time and therefore test time.
autouse (bool, optional): If :obj:`True`, automatically use this
fixture in applicable tests. Default: :obj:`False`
docker_client (:class:`~docker.client.DockerClient`, optional):
Docker client to run the Localstack container with.
Defaults to :func:`docker.client.from_env`.
region_name (str, optional): Region name to assume.
Each Localstack container acts like a single AWS region.
Defaults to :const:`"us-east-1"`.
kinesis_error_probability (float, optional): Decimal value between
0.0 (default) and 1.0 to randomly inject
ProvisionedThroughputExceededException errors
into Kinesis API responses.
dynamodb_error_probability (float, optional): Decimal value
between 0.0 (default) and 1.0 to randomly inject
ProvisionedThroughputExceededException errors into
DynamoDB API responses.
container_log_level (int, optional): The logging level to use
for Localstack container logs. Defaults to :data:`logging.DEBUG`.
localstack_verison (str, optional): The version of the Localstack
image to use. Defaults to :const:`"latest"`.
auto_remove (bool, optional): If :obj:`True`, delete the Localstack
container when it stops. Default: :obj:`True`
pull_image (bool, optional): If :obj:`True`, pull the Localstack
image before running it. Default: :obj:`True`
container_name (str, optional): The name for the Localstack
container. Defaults to a randomly generated id.
**kwargs: Additional kwargs will be passed to the
:class:`.LocalstackSession`.
Returns:
A :func:`pytest fixture <_pytest.fixtures.fixture>`.
"""
@pytest.fixture(scope=scope, autouse=autouse)
def _fixture():
with _make_session(
docker_client=docker_client,
services=services,
region_name=region_name,
kinesis_error_probability=kinesis_error_probability,
dynamodb_error_probability=dynamodb_error_probability,
container_log_level=container_log_level,
localstack_verison=localstack_verison,
auto_remove=auto_remove,
pull_image=pull_image,
container_name=container_name,
**kwargs
) as session:
with session.botocore.patch_botocore():
yield session
return _fixture
|
304c3fbf99d943cb44e5b2967c984612fa5ca6dc
| 18,130 |
def euler_to_quat(e, order='zyx'):
"""
Converts from an euler representation to a quaternion representation
:param e: euler tensor
:param order: order of euler rotations
:return: quaternion tensor
"""
axis = {
'x': np.asarray([1, 0, 0], dtype=np.float32),
'y': np.asarray([0, 1, 0], dtype=np.float32),
'z': np.asarray([0, 0, 1], dtype=np.float32)}
q0 = angle_axis_to_quat(e[..., 0], axis[order[0]])
q1 = angle_axis_to_quat(e[..., 1], axis[order[1]])
q2 = angle_axis_to_quat(e[..., 2], axis[order[2]])
return quat_mul(q0, quat_mul(q1, q2))
|
ff5a848433d3cb9b878222b21fc79f06e42ea03f
| 18,131 |
def setnumber(update,context):
"""
Bot '/setnumber' command: starter of the conversation to set the emergency number
"""
update.message.reply_text('Please insert the number of a person you trust. It can be your life saver!')
return EMERGENCY
|
5faa4d9a9719d0b1f113f5912de728d24aee2814
| 18,132 |
def mean_ale(covmats, tol=10e-7, maxiter=50, sample_weight=None):
"""Return the mean covariance matrix according using the AJD-based
log-Euclidean Mean (ALE). See [1].
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
Notes
-----
.. versionadded:: 0.2.4
References
----------
[1] M. Congedo, B. Afsari, A. Barachant, M. Moakher, 'Approximate Joint
Diagonalization and Geometric Mean of Symmetric Positive Definite
Matrices', PLoS ONE, 2015
"""
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
crit = np.inf
k = 0
# init with AJD
B, _ = ajd_pham(covmats)
while (crit > tol) and (k < maxiter):
k += 1
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = logm(np.dot(np.dot(B.T, Ci), B))
J += sample_weight[index] * tmp
update = np.diag(np.diag(expm(J)))
B = np.dot(B, invsqrtm(update))
crit = distance_riemann(np.eye(n_channels), update)
A = np.linalg.inv(B)
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = logm(np.dot(np.dot(B.T, Ci), B))
J += sample_weight[index] * tmp
C = np.dot(np.dot(A.T, expm(J)), A)
return C
|
0df7add370bda62e596abead471f3d393691f62c
| 18,133 |
def get_affiliate_code_from_qstring(request):
"""
Gets the affiliate code from the querystring if one exists
Args:
request (django.http.request.HttpRequest): A request
Returns:
Optional[str]: The affiliate code (or None)
"""
if request.method != "GET":
return None
affiliate_code = request.GET.get(AFFILIATE_QS_PARAM)
return affiliate_code
|
173b8f7ed3d202e0427d45609fcb8e9332cde15b
| 18,134 |
def get_gifti_labels(gifti):
"""Returns labels from gifti object (*.label.gii)
Args:
gifti (gifti image):
Nibabel Gifti image
Returns:
labels (list):
labels from gifti object
"""
# labels = img.labeltable.get_labels_as_dict().values()
label_dict = gifti.labeltable.get_labels_as_dict()
labels = list(label_dict.values())
return labels
|
3a4915ed50132a022e29cfed4e90905d05209484
| 18,135 |
def get_temporal_info(data):
"""Generates the temporal information related
power consumption
:param data: a list of temporal information
:type data: list(DatetimeIndex)
:return: Temporal contextual information of the energy data
:rtype: np.array
"""
out_info =[]
for d in data:
seconds = (d - d.iloc[0]).dt.total_seconds().values / np.max((d - d.iloc[0]).dt.total_seconds().values)
minutes = d.dt.minute.values / 60
hod = d.dt.hour.values / 24
dow = d.dt.dayofweek.values / 7
out_info.append([seconds, minutes, hod, dow])
return np.transpose(np.array(out_info)).reshape((-1,4))
|
308640fec7545409bfad0ec55cd1cc8c941434d2
| 18,136 |
def html_url(url: str, name: str = None, theme: str = "") -> str:
"""Create a HTML string for the URL and return it.
:param url: URL to set
:param name: Name of the URL, if None, use same as URL.
:param theme: "dark" or other theme.
:return: String with the correct formatting for URL
"""
if theme == "dark":
color = "#988fd4"
else:
color = "#1501a3"
if name is None:
name = url
retval = f'<a href="{url}" style="color:{color}">{name}</a>'
return retval
|
74a0d3eabce4f0a53e699567e25c9d09924e3150
| 18,137 |
def get_logger_messages(loggers=[], after=0):
""" Returns messages for the specified loggers.
If given, limits the messages to those that occured after the given timestamp"""
if not isinstance(loggers, list):
loggers = [loggers]
return logger.get_logs(loggers, after)
|
d2c8ef6dc8f1ec0f4a5f7a1263b829f20e0dfa8b
| 18,138 |
def __dir__():
"""IPython tab completion seems to respect this."""
return __all__
|
d1b0fe35370412a6c0ca5d323417e4e3d1b3b603
| 18,139 |
def run_iterations(histogram_for_random_words,
histogram_for_text,
iterations):
"""Helper function for test_stochastic_sample (below).
Store the results of running the stochastic_sample function for 10,000
iterations in a histogram.
Param: histogram_for_random_words(dict): all values sum to a total of 0
histogram_for_text(dict): all values represent frequency in text
iterations(int): number of trials to run for stochastic_sample
Return: histogram_for_random_words(dict): sum of all values = 10,000
"""
unique_words = words_in_text(histogram_for_random_words)
for i in range(iterations):
word = stochastic_sample(histogram_for_text)
for key_word in unique_words:
if word == key_word:
histogram_for_random_words[word] += 1
return histogram_for_random_words
|
59bd4cefd03403eee241479df19f011915419f14
| 18,140 |
def GetFootSensors():
"""Get the foot sensor values"""
# Get The Left Foot Force Sensor Values
LFsrFL = memoryProxy.getData("Device/SubDeviceList/LFoot/FSR/FrontLeft/Sensor/Value")
LFsrFR = memoryProxy.getData("Device/SubDeviceList/LFoot/FSR/FrontRight/Sensor/Value")
LFsrBL = memoryProxy.getData("Device/SubDeviceList/LFoot/FSR/RearLeft/Sensor/Value")
LFsrBR = memoryProxy.getData("Device/SubDeviceList/LFoot/FSR/RearRight/Sensor/Value")
LF=[LFsrFL, LFsrFR, LFsrBL, LFsrBR]
# Get The Right Foot Force Sensor Values
RFsrFL = memoryProxy.getData("Device/SubDeviceList/RFoot/FSR/FrontLeft/Sensor/Value")
RFsrFR = memoryProxy.getData("Device/SubDeviceList/RFoot/FSR/FrontRight/Sensor/Value")
RFsrBL = memoryProxy.getData("Device/SubDeviceList/RFoot/FSR/RearLeft/Sensor/Value")
RFsrBR = memoryProxy.getData("Device/SubDeviceList/RFoot/FSR/RearRight/Sensor/Value")
RF=[RFsrFL, RFsrFR, RFsrBL, RFsrBR]
return LF, RF
|
555c5cb1f6e68571848410096144a3184d22e28a
| 18,141 |
def norm(x, y):
"""
Calculate the Euclidean Distance
:param x:
:param y:
:return:
"""
return tf.sqrt(tf.reduce_sum((x - y) ** 2))
|
67766f9e3c3a510a87eff6bdea7ddf9ec2504af3
| 18,142 |
def expand_key(keylist, value):
"""
Recursive method for converting into a nested dict
Splits keys containing '.', and converts into a nested dict
"""
if len(keylist) == 0:
return expand_value(value)
elif len(keylist) == 1:
key = '.'.join(keylist)
base = dict()
base[key] = expand_value(value)
return base
else:
key = keylist[0]
value = expand_key(keylist[1:], value)
base = dict()
base[key] = expand_value(value)
return base
|
ac8b4bac9b686396d5d117149fb45b8bde2ac238
| 18,143 |
def _linear_sum_assignment(a, b):
"""
Given 1D arrays a and b, return the indices which specify the permutation of
b for which the element-wise distance between the two arrays is minimized.
Args:
a (array_like): 1D array.
b (array_like): 1D array.
Returns:
array_like: Indices which specify the desired permutation of b.
"""
# This is the first guess for a solution but sometimes we get duplicate
# indices so for those values we need to choose the 2nd or 3rd best
# solution. This approach can fail if there are too many elements in b which
# map tothe same element of a but it's good enough for our purposes. For a
# general solution see the Hungarian algorithm/optimal transport algorithms.
idcs_initial = jnp.argsort(jnp.abs(b - a[:, None]), axis=1)
idcs_final = jnp.repeat(999, len(a))
def f(carry, idcs_initial_row):
i, idcs_final = carry
cond1 = jnp.isin(idcs_initial_row[0], jnp.array(idcs_final))
cond2 = jnp.isin(idcs_initial_row[1], jnp.array(idcs_final))
idx_closest = jnp.where(
cond1, jnp.where(cond2, idcs_initial_row[2], idcs_initial_row[1]), idcs_initial_row[0]
)
idcs_final = idcs_final.at[i].set(idx_closest)
return (i + 1, idcs_final), idx_closest
_, res = lax.scan(f, (0, idcs_final), idcs_initial)
return res
|
eeecff894e8bf29de66fa2560b8fdadbf3970d6d
| 18,144 |
def get_ingredients_for_slice_at_pos(pos, frame, pizza, constraints):
"""
Get the slice of pizza with its ingredients
:param pos:
:param frame:
:param pizza:
:param constraints:
:return:
"""
def _get_ingredients_for_slice_at_pos(_pos, _frame, _pizza, _max_rows, _max_cols):
if not is_valid_pos_for_frame(_pos, _frame, constraints):
return False
cur_slice = list()
for r in range(_frame['r']):
cur_slice.append(_pizza[_pos['r'] + r][_pos['c']:_pos['c'] + _frame['c']])
return cur_slice
return _get_ingredients_for_slice_at_pos(pos, frame, pizza, constraints["R"], constraints["C"])
|
db1083695d6f9503b3005e57db47c15ac761a31d
| 18,145 |
def merge_data_includes(tweets_data, tweets_include):
"""
Merges tweet object with other objects, i.e. media, places, users etc
"""
df_tweets_tmp = pd.DataFrame(tweets_data)
# Add key-values of a nested dictionary in df_tweets_tmp as new columns
df_tweets = flat_dict(df_tweets_tmp)
for incl in tweets_include:
df_incl = pd.DataFrame(tweets_include[incl])
if incl == 'media':
# Split each row to multiple rows for each item in media_keys list
df_tweets = df_tweets.explode('media_keys')
df_tweets = pd.merge(df_tweets, df_incl, how='left', left_on=['media_keys'], right_on=['media_key'],
suffixes=[None,'_media'])
if incl == 'places':
df_tweets = pd.merge(df_tweets, df_incl, how='left', left_on=['place_id'], right_on=['id'],
suffixes=[None,'_places'])
if incl == 'users':
df_tweets = pd.merge(df_tweets, df_incl, how='left', left_on=['author_id'], right_on=['id'],
suffixes=[None,'_users'])
return df_tweets
|
db8e8560bdb80bd4a57d4f0d69031e944511633f
| 18,146 |
def stringify(context, mapping, thing):
"""Turn values into bytes by converting into text and concatenating them"""
if isinstance(thing, bytes):
return thing # retain localstr to be round-tripped
return b''.join(flatten(context, mapping, thing))
|
c4c4503160cab3ff6a78e2fb724fd283011ce0e7
| 18,147 |
import logging
import json
def extract_from_json(json_str, verbose=False):
"""A helper function to extract data from KPTimes dataset in json format
:param: json_str: the json string
:param: verbose: bool, if logging the process of data processing
:returns: the articles and keywords for each article
:rtype: src (list of string), tgt (list of keyword list)
"""
src = []
tgt = []
for idx in range(len(json_str)):
if idx % 1000 == 0:
if verbose:
logging.info('processing idx: ', idx)
data = json.loads(json_str[idx])
article = data['abstract']
keyword = data['keyword']
keyword = keyword.split(';')
src.append(article)
tgt.append(keyword)
return src, tgt
|
b05120eee45a887cee5eac68febffe96fcf8d305
| 18,148 |
def split_data(data, split_ratio, data_type=DATA_TYPE_1):
"""
split data by type
"""
data_type_1 = data[data['LABEL'] == data_type]
data_type_2 = data[data['LABEL'] != data_type]
train_set = data.sample(frac=split_ratio, replace=False)
test_set = data[~data.index.isin(train_set.index)]
train_set_type_1 = data_type_1.sample(frac=split_ratio, replace=False)
test_set_type_1 = data_type_1[~data_type_1.index.isin(train_set_type_1.index)]
train_set_type_2 = data_type_2.sample(frac=split_ratio, replace=False)
test_set_type_2 = data_type_2[~data_type_2.index.isin(train_set_type_2.index)]
return train_set, test_set, train_set_type_1, test_set_type_1, train_set_type_2, test_set_type_2
|
2653ea65bbc6fa2c7c0db9ab29890f57d5254d3f
| 18,149 |
from typing import Set
import os
def _get_mtimes(arg: str) -> Set[float]:
"""
Get the modification times of any converted notebooks.
Parameters
----------
arg
Notebook to run 3rd party tool on.
Returns
-------
Set
Modification times of any converted notebooks.
"""
return {os.path.getmtime(arg)}
|
c19e7ba43f6fb1d776f10e39f9fa46d05c947c72
| 18,150 |
def sparse_amplitude_prox(a_model, indices_target, counts_target, frame_dimensions, eps=0.5, lam=6e-1):
"""
Smooth truncated amplitude loss from Chang et al., Overlapping Domain Decomposition Methods for Ptychographic Imaging, (2020)
:param a_model: K x M1 x M2
:param indices_target: K x num_max_counts
:param counts_target: K x num_max_counts
:param frame_dimensions: 2
:return: loss (K,), grad (K x M1 x M2)
"""
threadsperblock = (256,)
blockspergrid = tuple(np.ceil(np.array(np.prod(a_model.shape)) / threadsperblock).astype(np.int))
loss = th.zeros((a_model.shape[0],), device=a_model.device, dtype=th.float32)
grad = th.ones_like(a_model)
no_count_indicator = th.iinfo(indices_target.dtype).max
sparse_amplitude_prox_kernel[blockspergrid, threadsperblock](a_model.detach(), indices_target.detach(),
counts_target.detach(), loss.detach(), grad.detach(),
no_count_indicator, eps, lam)
return loss, grad
|
9a2b7c0deb2eba58cebd6f7b2198c659c1915711
| 18,151 |
from typing import Dict
from typing import Any
from typing import List
def schema_as_fieldlist(content_schema: Dict[str, Any], path: str = "") -> List[Any]:
"""Return a list of OpenAPI schema property descriptions."""
fields = []
if "properties" in content_schema:
required_fields = content_schema.get("required", ())
for prop, options in content_schema["properties"].items():
new_path = path + "." + prop if path else prop
required = (
options["required"]
if "required" in options
else prop in required_fields
)
if "type" not in options:
fields.append(FieldDescription.load(options, new_path, required))
elif options["type"] == "object":
fields.append(FieldDescription.load(options, new_path, required))
fields.extend(schema_as_fieldlist(options, path=new_path))
elif options["type"] == "array":
fields.append(FieldDescription.load(options, new_path, required))
fields.extend(
schema_as_fieldlist(options["items"], path=new_path + ".[]")
)
else:
fields.append(FieldDescription.load(options, new_path, required))
if "items" in content_schema:
new_path = path + "." + "[]" if path else "[]"
content_schema["type"] = "array of {}s".format(
deduce_type(content_schema["items"])
)
fields.append(FieldDescription.load(content_schema, new_path))
fields.extend(schema_as_fieldlist(content_schema["items"], path=new_path))
return fields
|
b691e74ac36a0f3904bd317acee9b9344a440cdb
| 18,152 |
def shrink(filename):
"""
:param filename: str, the location of the picture
:return: img, the shrink picture
"""
img = SimpleImage(filename)
new_img = SimpleImage.blank((img.width+1) // 2, (img.height+1) // 2)
for x in range(0, img.width, 2):
for y in range(0, img.height, 2):
pixel = img.get_pixel(x, y)
new_pixel = new_img.get_pixel(x//2, y//2)
"""
For every pixel(x, y) in img, assigns the average RGB of pixel(x, y), pixel(x+1, y),
pixel(x, y+1) and pixel(x+1, y+1) to new_pixel(x//2, y//2) in new_img.
"""
if ((img.width+1) % 2 == 0 and x == img.width - 1) or ((img.height + 1) % 2 == 0 and y == img.height - 1):
# It's the end of img.width or img.height.
new_pixel.red = pixel.red
new_pixel.green = pixel.green
new_pixel.blue = pixel.blue
else:
pixel1 = img.get_pixel(x+1, y)
pixel2 = img.get_pixel(x, y+1)
pixel3 = img.get_pixel(x, y+1)
new_pixel.red = (pixel.red + pixel1.red + pixel2.red + pixel3.red) // 4
new_pixel.green = (pixel.green + pixel1.green + pixel2.green + pixel3.green) // 4
new_pixel.blue = (pixel.blue + pixel1.blue + pixel2.blue + pixel3.blue) // 4
return new_img
|
fad3778089b0d5f179f62fb2a40ec80fd3fe37d1
| 18,153 |
def eh_menor_que_essa_quantidade_de_caracters(palavra: str, quantidade: int) -> bool:
"""
Função para verificar se a string é menor que a quantidade de caracters informados
@param palavra: A palavra a ser verificada
@param quantidade: A quantidade de caracters que deseja verificar
@return: Retorna True em caso da palavra seja menor que a quantidade de caracters e False em caso negativo
"""
tamanho = len(palavra)
eh_menor = False
if tamanho < quantidade:
eh_menor = True
return eh_menor
|
827469606b0b93b78b63686465decbbbc63b9673
| 18,154 |
import rasterstats as rs
def buffer_sampler(ds,geom,buffer,val='median',ret_gdf=False):
"""
sample values from raster at the given ICESat-2 points
using a buffer distance, and return median/mean or a full gdf ( if return gdf=True)
Inputs = rasterio dataset, Geodataframe containing points, buffer distance, output value = median/mean (default median)
and output format list of x_atc,output_value arrays (default) or full gdf
"""
ndv = get_ndv(ds)
array = ds.read(1)
gt = ds.transform
stat = val
geom = geom.to_crs(ds.crs)
x_min,y_min,x_max,y_max = ds.bounds
geom = geom.cx[x_min:x_max, y_min:y_max]
geom['geometry'] = geom.geometry.buffer(buffer)
json_stats = rs.zonal_stats(geom,array,affine=gt,geojson_out=True,stats=stat,nodata=ndv)
gdf = gpd.GeoDataFrame.from_features(json_stats)
if val =='median':
gdf = gdf.rename(columns={'median':'med'})
call = 'med'
else:
gdf = gdf.rename(columns={'mean':'avg'})
call = 'avg'
if ret_gdf:
out_file = gdf
else:
out_file = [gdf.x_atc.values,gdf[call].values]
return out_file
|
8efde64c0ee49b11e484fd204cf70ae5ae322bf9
| 18,155 |
import re
def extract_int(str, start, end):
""" Returns the integer between start and end. """
val = extract_string(str, start, end)
if not val is None and re.match('^[0-9]{1,}$', val):
return int(val)
return None
|
ec08c15592ea7e7ab9e4a0f476a97ba2127dda85
| 18,156 |
import re
def get_pg_ann(diff, vol_num):
"""Extract pedurma page and put page annotation.
Args:
diff (str): diff text
vol_num (int): volume number
Returns:
str: page annotation
"""
pg_no_pattern = fr"{vol_num}\S*?(\d+)"
pg_pat = re.search(pg_no_pattern, diff)
try:
pg_num = pg_pat.group(1)
except Exception:
pg_num = 0
return f"<p{vol_num}-{pg_num}>"
|
d9ca1a760f411352d8bcbe094ac622f7dbd33d07
| 18,157 |
def check_diamond(structure):
"""
Utility function to check if the structure is fcc, bcc, hcp or diamond
Args:
structure (pyiron_atomistics.structure.atoms.Atoms): Atomistic Structure object to check
Returns:
bool: true if diamond else false
"""
cna_dict = structure.analyse.pyscal_cna_adaptive(
mode="total", ovito_compatibility=True
)
dia_dict = structure.analyse.pyscal_diamond_structure(
mode="total", ovito_compatibility=True
)
return (
cna_dict["CommonNeighborAnalysis.counts.OTHER"]
> dia_dict["IdentifyDiamond.counts.OTHER"]
)
|
ae082d6921757163cce3ddccbca15bf70621a092
| 18,158 |
from typing import Optional
from typing import Union
from typing import Dict
from typing import Any
from typing import List
from typing import Tuple
def compute_correlation(
df: DataFrame,
x: Optional[str] = None,
y: Optional[str] = None,
*,
cfg: Union[Config, Dict[str, Any], None] = None,
display: Optional[List[str]] = None,
value_range: Optional[Tuple[float, float]] = None,
k: Optional[int] = None,
) -> Intermediate:
# pylint: disable=too-many-arguments
"""
Parameters
----------
df
The pandas dataframe for which plots are calculated for each column.
cfg
Config instance
x
A valid column name of the dataframe
y
A valid column name of the dataframe
value_range
If the correlation value is out of the range, don't show it.
cfg: Union[Config, Dict[str, Any], None], default None
When a user call plot_correlation(), the created Config object will be passed to
compute_correlation().
When a user call compute_correlation() directly, if he/she wants to customize the output,
cfg is a dictionary for configuring. If not, cfg is None and
default values will be used for parameters.
display: Optional[List[str]], default None
A list containing the names of the visualizations to display. Only exist when
a user call compute_correlation() directly and want to customize the output
k
Choose top-k element
"""
if isinstance(cfg, dict):
cfg = Config.from_dict(display, cfg)
elif not cfg:
cfg = Config()
df = preprocess_dataframe(df)
if x is None and y is None: # pylint: disable=no-else-return
with catch_warnings():
filterwarnings(
"ignore",
"overflow encountered in long_scalars",
category=RuntimeWarning,
)
return _calc_overview(df, cfg, value_range=value_range, k=k)
elif x is not None and y is None:
with catch_warnings():
filterwarnings(
"ignore",
"overflow encountered in long_scalars",
category=RuntimeWarning,
)
return _calc_univariate(df, x, cfg, value_range=value_range, k=k)
elif x is None and y is not None:
raise ValueError("Please give the column name to x instead of y")
elif x is not None and y is not None:
return _calc_bivariate(df, cfg, x, y, k=k)
raise ValueError("Not Possible")
|
a8fb7f4e6cf34d584aba8e8fa9a7a7703fad8bad
| 18,159 |
def radix_sort(arr):
"""Sort list of numberes with radix sort."""
if len(arr) > 1:
buckets = [[] for x in range(10)]
lst = arr
output = []
t = 0
m = len(str(max(arr)))
while m > t:
for num in lst:
if len(str(num)) >= t + 1:
for b_num in range(10):
idx = num // 10**t % 10
if idx == b_num:
buckets[b_num].append(num)
break
else:
output.append(num)
lst = []
for bucket in buckets:
lst += bucket
buckets = [[] for x in range(10)]
t += 1
output += lst
return output
else:
return arr
|
517ab99483ac1c6cd18df11dc1dccb4c502cac39
| 18,160 |
import logging
import os
def du(path, *args, **kwargs):
"""
pathに含まれるファイルのバイト数を取得する。
:param str path: パス
:rtype: int
:return:
"""
# 変数を初期化します。
_debug = kwargs.get("debug")
logger = kwargs.get("logger")
if not logger:
logger = logging.getLogger(__file__)
byte = 0
for root, _dirs, files in os.walk(path):
for fname in files:
path = os.path.join(root, fname)
if os.path.isfile(path):
byte += os.path.getsize(path)
return byte
|
91b5927da5748e242c47eea2735c5ec3f00f2a5b
| 18,161 |
def resampling(w, rs):
"""
Stratified resampling with "nograd_primitive" to ensure autograd
takes no derivatives through it.
"""
N = w.shape[0]
bins = np.cumsum(w)
ind = np.arange(N)
u = (ind + rs.rand(N))/N
return np.digitize(u, bins)
|
2f3d6ae173d5e0ebdfe36cd1ab6595af7452c191
| 18,162 |
import torch
def integrated_bn(fms, bn):
"""iBN (integrated Batch Normalization) layer of SEPC."""
sizes = [p.shape[2:] for p in fms]
n, c = fms[0].shape[0], fms[0].shape[1]
fm = torch.cat([p.view(n, c, 1, -1) for p in fms], dim=-1)
fm = bn(fm)
fm = torch.split(fm, [s[0] * s[1] for s in sizes], dim=-1)
return [p.view(n, c, s[0], s[1]) for p, s in zip(fm, sizes)]
|
bee6d8782b372c0fb3990eefa42d51c6acacc29b
| 18,163 |
def get_RF_calculations(model, criteria, calculation=None, clus="whole", too_large=None,
sgonly=False, regionalonly=False):
"""
BREAK DOWN DATA FROM CALCULATION!
or really just go pickle
"""
print(f'{utils.time_now()} - Criteria: {criteria}, calculation: {calculation}, clus: {clus}, sgonly: {sgonly}, regionalonly: {regionalonly}')
# pickling the entire dataset which is what z-score will be calculated against
if sgonly: location_tag = '_sgonly'
elif regionalonly: location_tag = '_regionalonly'
else: location_tag = ''
found = utils.find(f"{criteria}_serialized_{clus}{location_tag}.pkl", model.cluster_dir)
if found: found = found[0]
else:
# note: why each model is pickled even as a whole or even in its cluster
# is that it relieves holding in-memory these arrays
# later, these pickles are simply opened lazily when needed
print(f'"{criteria}_serialized_{clus}{location_tag}.pkl" not found.')
found = acquire_rf_subset(model, criteria, clus, location_tag)
utils.to_pickle(f"{criteria}_serialized_{clus}{location_tag}", found, model.cluster_dir)
if type(found) == str:
pkl = utils.open_pickle(found)
else: pkl = found # for when cluster-wise, this is not a path but the actual numpy array
if calculation == "mean" and len(pkl.shape) >2:
daskarr = da.from_array(pkl, chunks=(500, pkl.shape[1], pkl.shape[2]))
return daskarr.mean(axis=0).compute() *100
elif calculation == "std" and len(pkl.shape) >2:
daskarr = da.from_array(pkl, chunks=(500, pkl.shape[1], pkl.shape[2]))
return daskarr.std(axis=0).compute() *100
elif calculation == "90perc" and len(pkl.shape) >2:
print('got back')
if too_large:
pkl = pkl.chunk({'time':-1, 'lon':2, 'lat':2})
return pkl.quantile(0.9, dim='time').persist().values
else:
return np.percentile(pkl.values, 90, axis=0)
elif calculation == "10perc" and len(pkl.shape) >2:
print('got back')
if too_large:
pkl = pkl.chunk({'time':-1, 'lon':2, 'lat':2})
return pkl.quantile(0.1, dim='time').persist().values
else:
return np.percentile(pkl.values, 10, axis=0)
# da.map_blocks(np.percentile, pkl, axis=0, q=q)
# daskarr = da.from_array(pkl, chunks=(500, pkl.shape[1], pkl.shape[2]))
# print('yer')
# percentile_rank_lst = []
# for p in range(pkl.shape[1]):
# for k in range(pkl.shape[2]):
# pkl_ = pkl[:, p, k]
# percentile_rank_lst.append(np.percentile(pkl_, 90))
# percentile_rank_lst = []
# for p in range(pkl.shape[1]):
# for k in range(pkl.shape[2]):
# pkl_ = pkl[:, p, k]
# percentile_rank_lst.append(np.percentile(pkl_, 90))
# daskarr = da.from_array(pkl, chunks=(500, pkl.shape[1], pkl.shape[2]))
# return da.percentile(pkl, 90).compute()
# return np.array(percentile_rank_lst).reshape(pkl.shape[1], pkl.shape[2])
else:# e.g. rf_ds_lon has None as <calculation>
return pkl
|
34b44b3a525bd7cee562a63d689fc21d5a5c2a4a
| 18,164 |
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
# tf.io.decode_png function cannot be applied on a batch of data.
# We have to use tf.map_fn
image_features = tf.map_fn(
lambda x: tf.io.decode_png(x[0], channels=3),
inputs[_IMAGE_KEY],
dtype=tf.uint8)
# image_features = tf.cast(image_features, tf.float32)
image_features = tf.image.resize(image_features, [224, 224])
image_features = tf.keras.applications.mobilenet.preprocess_input(
image_features)
outputs[_transformed_name(_IMAGE_KEY)] = image_features
# TODO(b/157064428): Support label transformation for Keras.
# Do not apply label transformation as it will result in wrong evaluation.
outputs[_transformed_name(_LABEL_KEY)] = inputs[_LABEL_KEY]
return outputs
|
88d2cdc03a3b762ba21d41205ec37ad674ed20b1
| 18,165 |
from plugin.helpers import log_plugin_error
import importlib
import pkgutil
def get_modules(pkg, recursive: bool = False):
"""get all modules in a package"""
if not recursive:
return [importlib.import_module(name) for finder, name, ispkg in iter_namespace(pkg)]
context = {}
for loader, name, ispkg in pkgutil.walk_packages(pkg.__path__):
try:
module = loader.find_module(name).load_module(name)
pkg_names = getattr(module, '__all__', None)
for k, v in vars(module).items():
if not k.startswith('_') and (pkg_names is None or k in pkg_names):
context[k] = v
context[name] = module
except AppRegistryNotReady:
pass
except Exception as error:
# this 'protects' against malformed plugin modules by more or less silently failing
# log to stack
log_plugin_error({name: str(error)}, 'discovery')
return [v for k, v in context.items()]
|
96c48ae86a01defe054e5a4fc948c2f9cfb05660
| 18,166 |
def TransformOperationHttpStatus(r, undefined=''):
"""Returns the HTTP response code of an operation.
Args:
r: JSON-serializable object.
undefined: Returns this value if there is no response code.
Returns:
The HTTP response code of the operation in r.
"""
if resource_transform.GetKeyValue(r, 'status', None) == 'DONE':
return (resource_transform.GetKeyValue(r, 'httpErrorStatusCode', None) or
200) # httplib.OK
return undefined
|
e840575ccbe468e6b3bc9d5dfb725751bd1a1464
| 18,167 |
import warnings
def split_record_fields(items, content_field, itemwise=False):
"""
This functionality has been moved to :func:`split_records()`, and this is just
a temporary alias for that other function. You should use it instead of this.
"""
warnings.warn(
"`split_record_fields()` has been renamed `split_records()`, "
"and this function is just a temporary alias for it.",
DeprecationWarning,
)
return split_records(items, content_field, itemwise=False)
|
256efc34bced15c5694fac2a7c4c1003214a54c5
| 18,168 |
from typing import cast
from typing import Dict
import os
def flask_formats() -> Response:
"""Invoke formats() from flask.
Returns:
Flask HTTP response
"""
envs = cast(Dict[str, str], os.environ)
return _as_response(formats(envs, _as_request(flash_request), envs.get("FLASK_ENV", "prod")))
|
b672e512f13fef232409632d103c89726980aba8
| 18,169 |
import scipy
import numpy
def prony(signal):
"""Estimates amplitudes and phases of a sparse signal using Prony's method.
Single-ancilla quantum phase estimation returns a signal
g(k)=sum (aj*exp(i*k*phij)), where aj and phij are the amplitudes
and corresponding eigenvalues of the unitary whose phases we wish
to estimate. When more than one amplitude is involved, Prony's method
provides a simple estimation tool, which achieves near-Heisenberg-limited
scaling (error scaling as N^{-1/2}K^{-3/2}).
Args:
signal(1d complex array): the signal to fit
Returns:
amplitudes(list of complex values): the amplitudes a_i,
in descending order by their complex magnitude
phases(list of complex values): the complex frequencies gamma_i,
correlated with amplitudes.
"""
num_freqs = len(signal) // 2
hankel0 = scipy.linalg.hankel(c=signal[:num_freqs],
r=signal[num_freqs - 1:-1])
hankel1 = scipy.linalg.hankel(c=signal[1:num_freqs + 1],
r=signal[num_freqs:])
shift_matrix = scipy.linalg.lstsq(hankel0.T, hankel1.T)[0]
phases = numpy.linalg.eigvals(shift_matrix.T)
generation_matrix = numpy.array(
[[phase**k for phase in phases] for k in range(len(signal))])
amplitudes = scipy.linalg.lstsq(generation_matrix, signal)[0]
amplitudes, phases = zip(*sorted(
zip(amplitudes, phases), key=lambda x: numpy.abs(x[0]), reverse=True))
return numpy.array(amplitudes), numpy.array(phases)
|
50bbcd05b1e541144207762052de9de783089bad
| 18,170 |
def _check_alignment(beh_events, alignment, candidates, candidates_set,
resync_i, check_i=None):
"""Check the alignment, account for misalignment accumulation."""
check_i = resync_i if check_i is None else check_i
beh_events = beh_events.copy() # don't modify original
events = np.zeros((beh_events.size))
start = np.argmin([abs(beh_e - candidates).min()
for beh_e in beh_events + alignment])
for i, beh_e in enumerate(beh_events[start:]):
error, events[start + i] = \
_event_dist(beh_e + alignment, candidates_set, candidates[-1],
check_i)
if abs(error) <= resync_i and start + i + 1 < beh_events.size:
beh_events[start + i + 1:] -= error
for i, beh_e in enumerate(beh_events[:start][::-1]):
error, events[start - i - 1] = \
_event_dist(beh_e + alignment, candidates_set, candidates[-1],
check_i)
if abs(error) <= resync_i and start - i - 2 > 0:
beh_events[:start - i - 2] -= error
return beh_events, events
|
e4508e90f11bb5b10619d19066a5fb51c36365b3
| 18,171 |
def user_info():
"""
个人中心基本资料展示
1、尝试获取用户信息
user = g.user
2、如果用户未登录,重定向到项目首页
3、如果用户登录,获取用户信息
4、把用户信息传给模板
:return:
"""
user = g.user
if not user:
return redirect('/')
data = {
'user': user.to_dict()
}
return render_template('blogs/user.html', data=data)
|
cb8d9c2081c8a26a82a451ce0f4de22fc1a43845
| 18,172 |
def build_config_tests_list():
"""Build config tests list"""
names,_,_,_ = zip(*config_tests)
return names
|
df190ec4926af461f15145bc25314a397d0be52b
| 18,173 |
import sys
import re
def get_layer_type_from_name(name=None) :
"""
get the layer type from the long form layer name
"""
if name is None :
print("Error get_layer_type_from_name - Bad args - exiting...")
sys.exit(1)
#print(name)
phase, layer_name = name.split(' ')
layer_type = layer_name
## @@@ For new NVTX - make the convension 'Phase LayerType,UniqueLayerName'
pattern = re.compile(r"([a-zA-Z0-9]+),(\S+)")
res = re.match(pattern, layer_name)
if res is not None:
layer_type = "{}".format(res.group(1))
layer_name = "{}".format(res.group(2))
return layer_type, phase, layer_name
'''
## @@@ For Deep Bench - Remove this - make Deep Bench follow 'Phase Type,UniqueName' pattern
pattern = re.compile(r"(Conv_\d+x\d+)")
res = re.match(pattern, layer_name)
if res is not None:
layer_type = "{}".format(res.group(1))
return layer_type, phase, layer_name
'''
### All remaining pattern matches are there to support KNF naming convention
pattern = re.compile(r"layer_\d+_\d+_(\w+)")
res = re.match(pattern, layer_name)
if res is not None:
layer_type = "{}".format(res.group(1))
return layer_type, phase, layer_name
## Look for res_branch_relu tag
#pattern = re.compile(r"res\w+_branch\w+_(relu)")
pattern = re.compile(r"res\w+[_]+(relu)")
res = re.match(pattern, layer_name)
if res is not None:
layer_type = "{}".format(res.group(1))
return layer_type, phase, layer_name
## Look for res_branch tag
pattern = re.compile(r"res\w+_branch\w+")
res = re.match(pattern, layer_type)
if res is not None:
layer_type = "conv"
return layer_type, phase, layer_name
## Look for bn_branch tag
pattern = re.compile(r"(bn)\w+_branch\w+")
res = re.match(pattern, layer_type)
if res is not None:
layer_type = "{}".format(res.group(1))
return layer_type, phase, layer_name
pattern = re.compile(r"res\d+[a-f]")
res = re.match(pattern, layer_type)
if res is not None:
if Debug:
print ("Found elt layer type from {}".format(layer_type))
layer_type = "elt"
return layer_type, phase, layer_name
# Get rid of numbers
layer_type = re.sub(r"\d+", "", layer_type)
# Special case - conv_expand - is a conv layer
pattern = re.compile(r"(\w+)_expand")
res = re.match(pattern, layer_type)
if res is not None:
layer_type = "{}".format(res.group(1))
return layer_type, phase, layer_name
## Look for bn_conv - V1 prototxt format has bn as first field V2 has it as 2nd field
pattern = re.compile(r"bn_(conv)")
res = re.match(pattern, layer_type)
if res is not None:
layer_type = "bn"
return layer_type, phase, layer_name
## Look for compound layer names - use the 2nd field for the name
layer_type = re.sub(r".*_(\w+)", "\g<1>", layer_type)
return layer_type, phase, layer_name
|
8baf34c563c1de64ad6e7f8a6f5ee8f84be5a36a
| 18,174 |
def annotate_filter(**decargs):
"""Add input and output watermarks to filtered events."""
def decorator(func):
"""Annotate events with entry and/or exit timestamps."""
def wrapper(event, *args, **kwargs):
"""Add enter and exit annotations to the processed event."""
funcname = ":".join([func.__module__, func.__name__])
enter_key = funcname + "|enter"
annotate_event(event, enter_key, **decargs)
out = func(event, *args, **kwargs)
exit_key = funcname + "|exit"
annotate_event(event, exit_key, **decargs)
return out
return wrapper
return decorator
|
e1ce16e46f17948bdb1eae3ac8e5884fe6553283
| 18,175 |
def cplot(*args,**kwargs):
"""
cplot - Plot on the current graphe
This is an "alias" to gcf().gca().plot()
"""
return(gcf().gca().plot(*args,**kwargs))
|
b7725569d19520c0e85f3a48d30800c3822cdac2
| 18,176 |
from datetime import datetime
def need_to_flush_metrics(time_now):
"""Check if metrics need flushing, and update the timestamp of last flush.
Even though the caller of this function may not successfully flush the
metrics, we still update the last_flushed timestamp to prevent too much work
being done in user requests.
Also, this check-and-update has to happen atomically, to ensure only one
thread can flush metrics at a time.
"""
if not interface.state.flush_enabled_fn():
return False
datetime_now = datetime.datetime.utcfromtimestamp(time_now)
minute_ago = datetime_now - datetime.timedelta(seconds=60)
with _flush_metrics_lock:
if interface.state.last_flushed > minute_ago:
return False
interface.state.last_flushed = datetime_now
return True
|
a2f50927a61eecee9448661f87f08a99caa4a22c
| 18,177 |
import os
def path_to_url(path):
"""Return the URL corresponding to a given path."""
if os.sep == '/':
return path
else:
return '/'.join(split_all(path))
|
da8044b026ec1130d5d5eaa8a083a3da56bae0e0
| 18,178 |
def create_instances_from_lists(x, y=None, name="data"):
"""
Allows the generation of an Instances object from a list of lists for X and a list for Y (optional).
All data must be numerical. Attributes can be converted to nominal with the
weka.filters.unsupervised.attribute.NumericToNominal filter.
:param x: the input variables (row wise)
:type x: list of list
:param y: the output variable (optional)
:type y: list
:param name: the name of the dataset
:type name: str
:return: the generated dataset
:rtype: Instances
"""
if y is not None:
if len(x) != len(y):
raise Exception("Dimensions of x and y differ: " + str(len(x)) + " != " + str(len(y)))
# create header
atts = []
for i in xrange(len(x[0])):
atts.append(Attribute.create_numeric("x" + str(i+1)))
if y is not None:
atts.append(Attribute.create_numeric("y"))
result = Instances.create_instances(name, atts, len(x))
# add data
for i in xrange(len(x)):
values = x[i][:]
if y is not None:
values.append(y[i])
result.add_instance(Instance.create_instance(values))
return result
|
310d72cb9fe5f65d85b19f9408e670426ebf7fdd
| 18,179 |
def jitdevice(func, link=[], debug=None, inline=False):
"""Wrapper for device-jit.
"""
debug = config.CUDA_DEBUGINFO_DEFAULT if debug is None else debug
if link:
raise ValueError("link keyword invalid for device function")
return compile_device_template(func, debug=debug, inline=inline)
|
363b48390cfbef9954d714cf3c7900d26693a09c
| 18,180 |
def median_filter_(img, mask):
"""
Applies a median filer to all channels
"""
ims = []
for d in range(3):
img_conv_d = median_filter(img[:,:,d], size=(mask,mask))
ims.append(img_conv_d)
return np.stack(ims, axis=2).astype("uint8")
|
2d7909b974572711901f84806009f237ecafaadf
| 18,181 |
def replace_lines(inst, clean_lines, norm_lines):
"""
Given an instance and a list of clean lines and normal lines,
add a cleaned tier and normalized if they do not already exist,
otherwise, replace them.
:param inst:
:type inst: xigt.Igt
:param clean_lines:
:type clean_lines: list[dict]
:param norm_lines:
:type norm_lines: list[dict]
"""
# -------------------------------------------
# Remove the old clean/norm lines.
# -------------------------------------------
old_clean_tier = cleaned_tier(inst)
if old_clean_tier is not None:
inst.remove(old_clean_tier)
old_norm_tier = normalized_tier(inst)
if old_norm_tier is not None:
inst.remove(old_norm_tier)
# -------------------------------------------
# Now, add the clean/norm lines, if provided.
# -------------------------------------------
if clean_lines:
new_clean_tier = create_text_tier_from_lines(inst, clean_lines, CLEAN_ID, CLEAN_STATE)
inst.append(new_clean_tier)
if norm_lines:
new_norm_tier = create_text_tier_from_lines(inst, norm_lines, NORM_ID, NORM_STATE)
inst.append(new_norm_tier)
return inst
|
39f3fdcd40eafd32e071b54c9ab032104fba8c7c
| 18,182 |
from pathlib import Path
def get_html(link: Link, path: Path) -> str:
"""
Try to find wget, singlefile and then dom files.
If none is found, download the url again.
"""
canonical = link.canonical_outputs()
abs_path = path.absolute()
sources = [canonical["singlefile_path"], canonical["wget_path"], canonical["dom_path"]]
document = None
for source in sources:
try:
with open(abs_path / source, "r", encoding="utf-8") as f:
document = f.read()
break
except (FileNotFoundError, TypeError):
continue
if document is None:
return download_url(link.url)
else:
return document
|
3624e3df219cc7d6480747407ad7de3ec702813e
| 18,183 |
def normalization(X,degree):
""" A scaling technique in which values
are shifted and rescaled so that they
end up ranging between 0 and 1.
It is also known as Min-Max scaling
----------------------------------------
degree: polynomial regression degree, or attribute/feature number
"""
X[:, :(degree)] = (X[:, :(degree)] - np.amin(X[:, :(degree)], axis = 0))/ \
(np.amax(X[:, :(degree)], axis = 0) - np.amin(X[:, :(degree)], axis = 0))
return X
|
9cdef8b4b7e7a31523311ce6f4a668c6039ad2a1
| 18,184 |
def get_tags_from_match(child_span_0, child_span_1, tags):
"""
Given two entities spans,
check if both are within one of the tags span,
and return the first match or O
"""
match_tags = []
for k, v in tags.items():
parent_span = (v["start"], v["end"])
if parent_relation(child_span_0, child_span_1, parent_span):
match_tags.append(v["tag"])
return match_tags[0] if match_tags else "O"
|
c7ad037d2c40b6316006b4c7dda2fd9d02640f6e
| 18,185 |
def _rfc822_escape(header):
"""Return a version of the string escaped for inclusion in an
RFC-822 header, by ensuring there are 8 spaces space after each newline.
"""
lines = header.split('\n')
header = ('\n' + 8 * ' ').join(lines)
return header
|
1a3cd02b057742db00ed741c40947cf4e19d1a86
| 18,186 |
import socket
def getCgiBaseHref():
"""Return value for <cgiBaseHref/> configuration parameter."""
val = sciflo.utils.ScifloConfigParser().getParameter('cgiBaseHref')
if val is None:
val = "http://%s/sciflo/cgi-bin/" % socket.getfqdn()
return val
|
62b5bc3d528c6db64ff8899c2847d2b0ecb4021d
| 18,187 |
import copy
import os
def _add_remote_resources(resources):
"""Retrieve remote resources like GATK/MuTect jars present in S3.
"""
out = copy.deepcopy(resources)
for prog, info in resources.items():
for key, val in info.items():
if key == "jar" and objectstore.is_remote(val):
store_dir = utils.safe_makedir(os.path.join(os.getcwd(), "inputs", "jars", prog))
fname = objectstore.download(val, store_dir, store_dir)
version_file = os.path.join(store_dir, "version.txt")
if not utils.file_exists(version_file):
version = install.get_gatk_jar_version(prog, fname)
with open(version_file, "w") as out_handle:
out_handle.write(version)
else:
with open(version_file) as in_handle:
version = in_handle.read().strip()
del out[prog][key]
out[prog]["dir"] = store_dir
out[prog]["version"] = version
return out
|
faac0cb96ed7cbe712d67e2d095dfb1fcbba8f99
| 18,188 |
def dijkstra(gph: GraphState,
algo: AlgoState,
txt: VisText,
start: Square,
end: Square,
ignore_node: Square = None,
draw_best_path: bool = True,
visualize: bool = True) \
-> [dict, bool]:
"""Code for the dijkstra algorithm"""
# Used to determine the order of squares to check. Order of args helper decide the priority.
queue_pos: int = 0
open_set = PriorityQueue()
open_set.put((0, queue_pos, start))
open_set_hash: set = {start}
# Determine what is the best square to check
g_score: dict = {square: float('inf') for row in gph.graph for square in row}
g_score[start] = 0
# Keeps track of next node for every node in graph. A linked list basically.
came_from: dict = {}
# Continues until every node has been checked or best path found
i = 0
while not open_set.empty():
# If uses closes window the program terminates
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
# Gets the square currently being checked
curr_square: Square = open_set.get()[2]
open_set_hash.remove(curr_square)
# Terminates if found the best path
if curr_square == end:
if draw_best_path:
best_path(gph, algo, txt, came_from, end, visualize=visualize)
return True
return came_from
# Decides the order of neighbours to check
for nei in curr_square.neighbours:
temp_g_score: int = g_score[curr_square] + 1
if temp_g_score < g_score[nei]:
came_from[nei] = curr_square
g_score[nei] = temp_g_score
if nei not in open_set_hash:
queue_pos += 1
open_set.put((g_score[nei], queue_pos, nei))
open_set_hash.add(nei)
if nei != end and nei.color != CLOSED_COLOR and nei != ignore_node:
nei.set_open()
# Only visualize if called. Checks if square is closed to not repeat when mid node included.
i += 1
if visualize and not curr_square.is_closed():
if i % gph.speed_multiplier == 0:
i = 0
draw(gph, txt, display_update=False)
draw_vis_text(txt, is_dijkstra=True)
# Sets square to closed after finished checking
if curr_square != start and curr_square != ignore_node:
curr_square.set_closed()
return False
|
cbc69734278e7ab4b0c609a1bfab5a9280bedee4
| 18,189 |
import sys
import os
import importlib
def calc_window_features(dict_features, signal_window, fs, **kwargs):
"""This function computes features matrix for one window.
Parameters
----------
dict_features : dict
Dictionary with features
signal_window: pandas DataFrame
Input from which features are computed, window
fs : int
Sampling frequency
\**kwargs:
See below:
* *features_path* (``string``) --
Directory of script with personal features
* *header_names* (``list or array``) --
Names of each column window
Returns
-------
pandas DataFrame
(columns) names of the features
(data) values of each features for signal
"""
features_path = kwargs.get('features_path', None)
names = kwargs.get('header_names', None)
# Execute imports
exec("import tsfel")
domain = dict_features.keys()
if features_path:
sys.path.append(features_path[:-len(features_path.split(os.sep)[-1])-1])
exec("import "+features_path.split(os.sep)[-1][:-3])
importlib.reload(sys.modules[features_path.split(os.sep)[-1][:-3]])
exec("from " + features_path.split(os.sep)[-1][:-3]+" import *")
# Create global arrays
func_total = []
func_names = []
imports_total = []
parameters_total = []
feature_results = []
feature_names = []
for _type in domain:
domain_feats = dict_features[_type].keys()
for feat in domain_feats:
# Only returns used functions
if dict_features[_type][feat]['use'] == 'yes':
# Read Function Name (generic name)
func_names = [feat]
# Read Function (real name of function)
func_total = [dict_features[_type][feat]['function']]
# Check for parameters
if dict_features[_type][feat]['parameters'] != '':
param = dict_features[_type][feat]['parameters']
# Check assert fs parameter:
if 'fs' in param:
# Select which fs to use
if fs is None:
# Check if features dict has default sampling frequency value
if type(param['fs']) is int or type(param['fs']) is float:
parameters_total = [str(key) + '=' + str(value) for key, value in param.items()]
else:
raise Exception('No sampling frequency assigned.')
else:
parameters_total = [str(key) + '=' + str(value) for key, value in param.items()
if key not in 'fs']
parameters_total += ['fs =' + str(fs)]
# feature has no fs parameter
else:
parameters_total = []
for key, value in param.items():
if type(value) is str:
value = '"'+value+'"'
parameters_total.append([str(key) + '=' + str(value)])
else:
parameters_total = ''
# To handle object type signals
signal_window = np.array(signal_window).astype(float)
# Name of each column to be concatenate with feature name
if not isinstance(signal_window, pd.DataFrame):
signal_window = pd.DataFrame(data=signal_window)
if names is not None:
if len(names) != len(list(signal_window.columns.values)):
raise Exception('header_names dimension does not match input columns.')
else:
header_names = names
else:
header_names = signal_window.columns.values
for ax in range(len(header_names)):
window = signal_window.iloc[:, ax]
execf = func_total[0] + '(window'
if parameters_total != '':
execf += ', ' + str(parameters_total).translate(str.maketrans({'[': '', ']': '', "'": ''}))
execf += ')'
eval_result = eval(execf, locals())
# Function returns more than one element
if type(eval_result) == tuple:
if np.isnan(eval_result[0]):
eval_result = np.zeros(len(eval_result))
for rr in range(len(eval_result)):
feature_results += [eval_result[rr]]
feature_names += [str(header_names[ax]) + '_' + func_names[0] + '_' + str(rr)]
else:
feature_results += [eval_result]
feature_names += [str(header_names[ax]) + '_' + func_names[0]]
features = pd.DataFrame(data=np.array(feature_results).reshape(1, len(feature_results)),
columns=np.array(feature_names))
return features
|
47edcbaec41bb3b5fb2d0c3713d3b5c23b0f3521
| 18,190 |
def nowIso8601():
"""
Returns time now in ISO 8601 format
use now(timezone.utc)
YYYY-MM-DDTHH:MM:SS.ffffff+HH:MM[:SS[.ffffff]]
.strftime('%Y-%m-%dT%H:%M:%S.%f%z')
'2020-08-22T17:50:09.988921+00:00'
Assumes TZ aware
For nanosecond use instead attotime or datatime64 in pandas or numpy
"""
return (nowUTC().isoformat(timespec='microseconds'))
|
c5290e5a60f708f19d1cecf74c9cd927b4750ca3
| 18,191 |
def get_trip_data(tripdata_path, output_path, start=None, stop=None):
"""
Read raw tripdata csv and filter unnecessary info.
1 - Check if output path exists
2 - If output path does not exist
2.1 - Select columns ("pickup_datetime",
"passenger_count",
"pickup_longitude",
"pickup_latitude",
"dropoff_longitude",
"dropoff_latitude")
2.2 - If start and stop are not None, get excerpt
3 - Save clean tripdata in a csv
3 - Return dataframe
Arguments:
tripdata_path {string} -- Raw trip data csv path
output_path {string} -- Cleaned trip data csv path
start {string} -- Datetime where tripdata should start (e.g., 2011-02-01 12:23:00)
stop {string} -- Datetime where tripdata should end (e.g., 2011-02-01 14:00:00)
Returns:
Dataframe -- Cleaned tripdata dataframe
"""
print("files:", output_path, tripdata_path)
# Trip data dataframe (Valentine's day)
tripdata_dt_excerpt = None
try:
# Load tripdata
tripdata_dt_excerpt = pd.read_csv(
output_path, parse_dates=True, index_col="pickup_datetime")
print("Loading file '{}'.".format(output_path))
except:
# Columns used
filtered_columns = ["pickup_datetime",
"passenger_count",
"pickup_longitude",
"pickup_latitude",
"dropoff_longitude",
"dropoff_latitude"]
# Reading file
tripdata_dt = pd.read_csv(tripdata_path,
parse_dates=True,
index_col="pickup_datetime",
usecols=filtered_columns,
na_values='0')
tripdata_dt_excerpt = None
# Get excerpt
if start and stop:
tripdata_dt_excerpt = pd.DataFrame(
tripdata_dt.loc[(tripdata_dt.index >= start) & (tripdata_dt.index <= stop)])
else:
tripdata_dt_excerpt = pd.DataFrame(tripdata_dt)
# Remove None values
tripdata_dt_excerpt.dropna(inplace=True)
# Sort
tripdata_dt_excerpt.sort_index(inplace=True)
# Save day data
tripdata_dt_excerpt.to_csv(output_path)
return tripdata_dt_excerpt
|
3aca0b89d1e747ae1ea3e5ea9f3fa0d63a5b9447
| 18,192 |
import urllib
def _qparams2url(qparams):
"""
parse qparams to make url segment
:param qparams:
:return: parsed url segment
"""
try:
if qparams == []:
return ""
assert len(qparams) == 4
num = len(qparams[0][1])
path=""
for i in range(num):
for j in range(4):
path += str(qparams[j][0]) + '=' + str(qparams[j][1][i]) + "&"
path = path[:-1]
return path
except:
return urllib.parse.urlencode(qparams, doseq=True)
|
ac416dd0dac87210fef5aa1bea97a60c84df60cf
| 18,193 |
import itertools
def confusion_matrix(y_pred: IntTensor,
y_true: IntTensor,
normalize: bool = True,
labels: IntTensor = None,
title: str = 'Confusion matrix',
cmap: str = 'Blues',
show: bool = True):
"""Plot confusion matrix
Args:
y_pred: Model prediction returned by `model.match()`
y_true: Expected class_id.
normalize: Normalizes matrix values between 0 and 1.
Defaults to True.
labels: List of class string label to display instead of the class
numerical ids. Defaults to None.
title: Title of the confusion matrix. Defaults to 'Confusion matrix'.
cmap: Color schema as CMAP. Defaults to 'Blues'.
show: If the plot is going to be shown or not. Defaults to True.
"""
with tf.device("/cpu:0"):
# Ensure we are working with integer tensors.
y_pred = tf.cast(tf.convert_to_tensor(y_pred), dtype='int32')
y_true = tf.cast(tf.convert_to_tensor(y_true), dtype='int32')
cm = tf.math.confusion_matrix(y_true, y_pred)
cm = tf.cast(cm, dtype='float')
accuracy = tf.linalg.trace(cm) / tf.math.reduce_sum(cm)
misclass = 1 - accuracy
if normalize:
cm = tf.math.divide_no_nan(
cm,
tf.math.reduce_sum(cm, axis=1)[:, np.newaxis]
)
plt.figure(figsize=(8, 6))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
if labels is not None:
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=45)
plt.yticks(tick_marks, labels)
cm_max = tf.math.reduce_max(cm)
thresh = cm_max / 1.5 if normalize else cm_max / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
val = cm[i, j]
color = "white" if val > thresh else "black"
txt = "%.2f" % val if val > 0.0 else "0"
plt.text(j, i, txt, horizontalalignment="center", color=color)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(
accuracy, misclass))
if show:
plt.show()
else:
return plt
|
744642cde03696f6ecbccc6f702e3f9a3cb67451
| 18,194 |
def from_smiles(smiles: str) -> Molecule:
"""Load a molecule from SMILES."""
return cdk.fromSMILES(smiles)
|
a5315eeb9ffadff16b90db32ca07714fe1573cda
| 18,195 |
from typing import Dict
from typing import Any
def parse_template_config(template_config_data: Dict[str, Any]) -> EmailTemplateConfig:
"""
>>> from tests import doctest_utils
>>> convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501
>>> parse_template_config({}) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> parse_template_config({
... 'subject': 'blah',
... }) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> parse_template_config({
... 'subject': 'blah',
... 'body': 'blah',
... }) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'html_body': 'rest_registration/register/body.html',
... 'text_body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt',
... 'rest_registration/register/body.html',
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'html_body': 'rest_registration/register/body.html',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.html',
... 'rest_registration/register/body.html',
... convert_html_to_text))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'text_body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt', None,
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt', None,
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'body': 'rest_registration/register/body.html',
... 'is_html': True,
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.html',
... 'rest_registration/register/body.html',
... convert_html_to_text))
OK
"""
try:
subject_template_name = template_config_data['subject']
except KeyError:
raise ImproperlyConfigured(_("No 'subject' key found")) from None
body_template_name = template_config_data.get('body')
text_body_template_name = template_config_data.get('text_body')
html_body_template_name = template_config_data.get('html_body')
is_html_body = template_config_data.get('is_html')
convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501
if html_body_template_name and text_body_template_name:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=text_body_template_name,
html_body_template_name=html_body_template_name,
text_body_processor=identity,
)
elif html_body_template_name:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=html_body_template_name,
html_body_template_name=html_body_template_name,
text_body_processor=convert_html_to_text,
)
elif text_body_template_name:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=text_body_template_name,
html_body_template_name=None,
text_body_processor=identity,
)
elif body_template_name:
if is_html_body:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=body_template_name,
html_body_template_name=body_template_name,
text_body_processor=convert_html_to_text,
)
else:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=body_template_name,
html_body_template_name=None,
text_body_processor=identity,
)
else:
raise ImproperlyConfigured(
'Could not parse template config data: {template_config_data}'.format( # noqa: E501
template_config_data=template_config_data))
_validate_template_name_existence(config.subject_template_name)
_validate_template_name_existence(config.text_body_template_name)
if config.html_body_template_name:
_validate_template_name_existence(config.html_body_template_name)
assert callable(config.text_body_processor)
return config
|
adea58fd8e8a16ec4fd48ef68aa2ff1c6356bd0d
| 18,196 |
import json
def stringify_message(message):
"""Return a JSON message that is alphabetically sorted by the key name
Args:
message
"""
return json.dumps(message, sort_keys=True, separators=(',', ':'))
|
ccd51481627449345ba70fbf45d8069deca0f064
| 18,197 |
import numpy as np
def compute_similarity_transform(X, Y, compute_optimal_scale=False):
"""
A port of MATLAB's `procrustes` function to Numpy.
Adapted from http://stackoverflow.com/a/18927641/1884420
Args
X: array NxM of targets, with N number of points and M point dimensionality
Y: array NxM of inputs
compute_optimal_scale: whether we compute optimal scale or force it to be 1
Returns:
d: squared error after transformation
Z: transformed Y
T: computed rotation
b: scaling
c: translation
"""
muX = X.mean(0)
muY = Y.mean(0)
X0 = X - muX
Y0 = Y - muY
ssX = (X0**2.).sum()
ssY = (Y0**2.).sum()
# centred Frobenius norm
normX = np.sqrt(ssX)
normY = np.sqrt(ssY)
# scale to equal (unit) norm
X0 = X0 / normX
Y0 = Y0 / normY
# optimum rotation matrix of Y
A = np.dot(X0.T, Y0)
U,s,Vt = np.linalg.svd(A,full_matrices=False)
V = Vt.T
T = np.dot(V, U.T)
# Make sure we have a rotation
detT = np.linalg.det(T)
V[:,-1] *= np.sign( detT )
s[-1] *= np.sign( detT )
T = np.dot(V, U.T)
traceTA = s.sum()
if compute_optimal_scale: # Compute optimum scaling of Y.
b = traceTA * normX / normY
d = 1 - traceTA**2
Z = normX*traceTA*np.dot(Y0, T) + muX
else: # If no scaling allowed
b = 1
d = 1 + ssY/ssX - 2 * traceTA * normY / normX
Z = normY*np.dot(Y0, T) + muX
c = muX - b*np.dot(muY, T)
return d, Z, T, b, c
|
10da3df241ec140de86b2307f9fc097b4f926407
| 18,198 |
def simplefenestration(idf, fsd, deletebsd=True, setto000=False):
"""convert a bsd (fenestrationsurface:detailed) into a simple
fenestrations"""
funcs = (window,
door,
glazeddoor,)
for func in funcs:
fenestration = func(idf, fsd, deletebsd=deletebsd, setto000=setto000)
if fenestration:
return fenestration
return None
|
b72e73a22756e80981d308b54037510354a5d327
| 18,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.