content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def heg_kfermi(rs):
""" magnitude of the fermi k vector for the homogeneous electron gas (HEG)
Args:
rs (float): Wigner-Seitz radius
Return:
float: kf
"""
density = (4*np.pi*rs**3/3)**(-1)
kf = (3*np.pi**2*density)**(1./3)
return kf | 4f210939ee7ec3c591c33ae7ec1b688ce2a257c6 | 23,660 |
import requests
import json
def stock_em_jgdy_detail():
"""
东方财富网-数据中心-特色数据-机构调研-机构调研详细
http://data.eastmoney.com/jgdy/xx.html
:return: 机构调研详细
:rtype: pandas.DataFrame
"""
url = "http://datainterface3.eastmoney.com/EM_DataCenter_V3/api/JGDYMX/GetJGDYMX"
params = {
"js": "datatable8174128",
"tkn": "eastmoney",
"secuCode": "",
"dateTime": "",
"sortfield": "0",
"sortdirec": "1",
"pageNum": "1",
"pageSize": "5000",
"cfg": "jgdymx",
"_": "1605088363693",
}
r = requests.get(url, params=params)
data_json = json.loads(r.text[r.text.find("(")+1:-1])
temp_df = pd.DataFrame([item.split("|") for item in data_json["Data"][0]["Data"]])
temp_df.columns = data_json["Data"][0]["FieldName"].split(",") + ["_"]
temp_df = temp_df.iloc[:, :-1]
return temp_df | 5d161ef69a77243202e48d80743c6664d8487549 | 23,661 |
def intersect_with_grid(int_coords, fill=False):
"""
Args:
- int_coords: projected coordinates to be used for intersection
- fill: whether to include the interior of the intersected cells. I.e.
if the coords of a box are provided and intersect with 0,0 and 4,4,
this would include the entire 25-cell grid
Returns:
GeoDataFrame with three columns:
- x: x coordinate of NDFD grid. A higher x seems to move down, towards the south?
- y: y coordinate of NDFD grid. A higher y seems to move right, towards the east?
- geometry: geometry of grid cell (reprojected back into WGS84)
"""
grid_path = create_grid()
with rasterio.Env(), rasterio.open(grid_path) as src:
intersected_cells = set()
for int_coord in int_coords:
intersected_cells.add(src.index(*int_coord))
if fill:
intersected_cells = fill_cells(intersected_cells)
# For each of the cells, generate its box
cell_boxes = []
for x, y in list(intersected_cells):
cell_boxes.append([
x, y, box(*src.xy(x, y, 'll'), *src.xy(x, y, 'ur'))])
grid = gpd.GeoDataFrame(
cell_boxes, columns=['x', 'y', 'geometry'], crs=constants.crs)
return grid.to_crs(epsg=4326) | 460faccf0280749f96b34e676a936cf8a39d4b61 | 23,662 |
def safe_epsilon_softmax(epsilon, temperature):
"""Tolerantly handles the temperature=0 case."""
egreedy = epsilon_greedy(epsilon)
unsafe = epsilon_softmax(epsilon, temperature)
def sample_fn(key: Array, logits: Array):
return jax.lax.cond(temperature > 0,
(key, logits), lambda tup: unsafe.sample(*tup),
(key, logits), lambda tup: egreedy.sample(*tup))
def probs_fn(logits: Array):
return jax.lax.cond(temperature > 0,
logits, unsafe.probs,
logits, egreedy.probs)
def log_prob_fn(sample: Array, logits: Array):
return jax.lax.cond(temperature > 0,
(sample, logits), lambda tup: unsafe.logprob(*tup),
(sample, logits), lambda tup: egreedy.logprob(*tup))
def entropy_fn(logits: Array):
return jax.lax.cond(temperature > 0,
logits, unsafe.entropy,
logits, egreedy.entropy)
def kl_fn(p_logits: Array, q_logits: Array):
return categorical_kl_divergence(p_logits, q_logits, temperature)
return DiscreteDistribution(sample_fn, probs_fn, log_prob_fn, entropy_fn,
kl_fn) | cf9d09dcd82638c526fb9508161181af6452dad5 | 23,663 |
def get_object_from_controller(object_type, object_name, controller_ip, username, password, tenant):
"""
This function defines that it get the object from controller or raise
exception if object status code is less than 299
:param uri: URI to get the object
:param controller_ip: ip of controller
:param username: usename of controller
:param password: password of controller
:param tenant: tenant of controller
:return: response status_code and content
"""
# Create new session
session = ApiSession.get_session(controller_ip, username,
password=password, tenant=tenant)
try:
resp = session.get_object_by_name(object_type, object_name)
return resp
except:
raise Exception("Failed get %s" % object_name, exc_info=True) | 590107e0106b87faa4fc228b6225e2317047ec19 | 23,664 |
from typing import DefaultDict
def scale_reshaping(scale: np.ndarray,
op2d: common.BaseNode,
kernel_channel_mapping: DefaultDict,
in_channels: bool = True) -> np.ndarray:
"""
Before scaling a kernel, the scale factor needs is reshaped to the correct
dimensions. This is a function of the layer that is scaled and whether its input channels or
output channels that should be scaled.
The index of the correct kernel index is obtained from kernel_channel_mapping.
Args:
scale: Scale factor to scale the kernel channels by.
op2d: Node to scale its kernel.
kernel_channel_mapping: Mapping from a layer to a tuple of indices of its output/input kernel channels.
in_channels: Kernel's index of input channels.
Returns:
The scale factor after reshaping it to the correct shape.
"""
op_ndims = op2d.get_weights_by_keys(KERNEL).ndim
reshape_target = np.ones(op_ndims, dtype=np.int)
reshape_target[kernel_channel_mapping.get(op2d.type)[int(in_channels)]] = -1
return np.reshape(scale, reshape_target) | edaa0ecbfc172f0a8a32a7bcc70629f1b51b3f57 | 23,665 |
def add_new_exif(info):
"""
创建exif记录(从表)
:param info:
:return:
"""
return ExifInfo(make=info.get('Image Make'),
model=info.get('Image Model'),
orientation=info.get('Image Orientation'),
date_original=info.get('EXIF DateTimeOriginal'),
x_resolution=info.get('Image XResolution'),
y_resolution=info.get('Image YResolution'),
resolution_unit=info.get('Image ResolutionUnit'),
artist=info.get('Image Artist'),
copyright=info.get('Image Copyright'),
software=info.get('Image Software'),
img_length=info.get('EXIF ExifImageLength'),
img_width=info.get('EXIF ExifImageWidth'),
exposure_time=info.get('EXIF ExposureTime'),
exposure_program=info.get('EXIF ExposureProgram'),
exposure_bias=info.get('EXIF ExposureBiasValue'),
exposure_mode=info.get('EXIF ExposureMode'),
fnumber=info.get('EXIF FNumber'),
sensitivity=info.get('EXIF ISOSpeedRatings'),
metering_mode=info.get('EXIF MeteringMode'),
flash=info.get('EXIF Flash'),
focal_len=info.get('EXIF FocalLength'),
white_balance=info.get('EXIF WhiteBalance'),
gps_latitude_ref=info.get('GPS GPSLatitudeRef'),
gps_latitude=info.get('GPS GPSLatitude'),
gps_longitude_ref=info.get('GPS GPSLongitudeRef'),
gps_longitude=info.get('GPS GPSLongitude'),
gps_altitude=info.get('GPS GPSAltitude'),
gps_datetime=info.get('GPS GPSDatetime'),
gps_direction=info.get(''),
gps_pos_err=info.get('')) | 55122efc1ef612b769be30a1e0735e237e12ab29 | 23,667 |
def prefetch_input_data(reader,
file_pattern,
is_training,
batch_size,
values_per_shard,
input_queue_capacity_factor=16,
num_reader_threads=1,
shard_queue_name="filename_queue",
value_queue_name="input_queue"):
"""Prefetches string values from disk into an input queue.
In training the capacity of the queue is important because a larger queue
means better mixing of training examples between shards. The minimum number of
values kept in the queue is values_per_shard * input_queue_capacity_factor,
where input_queue_memory factor should be chosen to trade-off better mixing
with memory usage.
Args:
reader: Instance of tf.ReaderBase.
file_pattern: Comma-separated list of file patterns (e.g.
/tmp/train_data-?????-of-00100).
is_training: Boolean; whether prefetching for training or eval.
batch_size: Model batch size used to determine queue capacity.
values_per_shard: Approximate number of values per shard.
input_queue_capacity_factor: Minimum number of values to keep in the queue
in multiples of values_per_shard. See comments above.
num_reader_threads: Number of reader threads to fill the queue.
shard_queue_name: Name for the shards filename queue.
value_queue_name: Name for the values input queue.
Returns:
A Queue containing prefetched string values.
"""
data_files = []
for pattern in file_pattern.split(","):
data_files.extend(tf.gfile.Glob(pattern))
if not data_files:
tf.logging.fatal("Found no input files matching %s", file_pattern)
else:
tf.logging.info("Prefetching values from %d files matching %s",
len(data_files), file_pattern)
if is_training:
filename_queue = tf.train.string_input_producer(
data_files, shuffle=True, capacity=16, name=shard_queue_name)
min_queue_examples = values_per_shard * input_queue_capacity_factor
capacity = min_queue_examples + 100 * batch_size
values_queue = tf.RandomShuffleQueue(
capacity=capacity,
min_after_dequeue=min_queue_examples,
dtypes=[tf.string],
name="random_" + value_queue_name)
else:
"""
num_epochs: If specified, string_input_producer produces each string
from string_tensor num_epochs times before generating an OutOfRange error.
If not specified, string_input_producer can cycle through the strings in
string_tensor an unlimited number of times.
"""
filename_queue = tf.train.string_input_producer(
data_files, num_epochs=None, shuffle=False, capacity=1, name=shard_queue_name)
capacity = values_per_shard + 3 * batch_size
values_queue = tf.FIFOQueue(
capacity=capacity, dtypes=[tf.string], name="fifo_" + value_queue_name)
enqueue_ops = []
for _ in range(num_reader_threads):
_, value = reader.read(filename_queue)
enqueue_ops.append(values_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(
values_queue, enqueue_ops))
tf.summary.scalar(
"queue/%s/fraction_of_%d_full" % (values_queue.name, capacity),
tf.cast(values_queue.size(), tf.float32) * (1. / capacity))
return values_queue | b754c1163cb868214e9ab74e1ae127a794a04808 | 23,668 |
def Chat_(request):
"""
{
"value" : "Your query"
}
"""
print(request.data)
serializer = PatternSerializer(request.data)
try:
response = ChatBot(serializer.data["value"])
except:
response = {
"error": "Data is in wrong formate use { 'value' : 'Your query' }",
"response": None,
"tag": None
}
return Response(response) | 33cada0ccbbea0e65d01179d51e5f1ed28f498bd | 23,669 |
def get_solubility(molecular_weight, density):
"""
Estimate the solubility of each oil pseudo-component
Estimate the solubility (mol/L) of each oil pseudo-component using the
method from Huibers and Lehr given in the huibers_lehr.py module of
py_gnome in the directory gnome/utilities/weathering/. This method is from
Huibers & Katrisky in a 2012 EPA report and was further modified by Lehr
to better match measured values. The equation used here is adapted to
return results in mol/L.
Parameters
----------
molecular_weight : np.array
Molecular weights of each pseudo-component as recorded in the NOAA
Oil Library (g/mol)
density : np.array
Density of each pseudo-component as recorded in the NOAA Oil Library
(kg/m^3)
Returns
-------
solubility : np.array
Array of solubilities (mol/L) for each pseudo-component of the oil.
"""
return 46.4 * 10. ** (-36.7 * molecular_weight / density) | 64a951e8a6d9579cf934893fe5c9bc0a9181d4cc | 23,670 |
def build_1d_frp_matrix(func, x, sigma, B=1):
""" Builds quadratic frp matrix respecting pbc.
func: Kernel function
x: position of points
sigma: width of Kernel
"""
N = len(x)
A = np.zeros((N, N))
shifts = np.arange(-5, 6) * B
for r in range(N):
for p in range(N):
value = 0
for shift in shifts:
value += func(x[r] - x[p] + shift, sigma[r])
A[r, p] = value
return A | cc2d2d51935847cc01aacb2afe5c42ad19c91fe8 | 23,671 |
def invalid_item(item_key, valid_flag=False):
"""
Update item valid_flag.
"""
if kind.str_is_empty(item_key):
raise RequiredError("item_key")
query = Registry.all()
query.filter("item_key =", item_key)
query.set("valid_flag", valid_flag)
return query.update(context.get_user_id()) | a99408dd770be0f8eb2e3c899b8d51160359b4fa | 23,672 |
def ret_str() -> str:
"""
# blahs
blahs
# blahs
Returns
-------
"""
# blahs
# blahs
# blahs
return '' | 56c182f971ff38444f5cc04fa1ea537ebbc3cb5f | 23,673 |
from typing import Union
def get_wh_words(document: Union[Doc, Span]):
"""
Get the list of WH-words\n
- when, where, why\n
- whence, whereby, wherein, whereupon\n
- how\n
- what, which, whose\n
- who, whose, which, what\n
Resources:\n
- https://grammar.collinsdictionary.com/easy-learning/wh-words\n
- https://www.ling.upenn.edu/hist-corpora/annotation/pos-wh.htm
:param document: The parsed document
:return: The list of WH-words
"""
return list([token for token in document if token.tag_ in ['WDT', 'WP', 'WP$', 'WRB']]) | a3dd46902bf161358239a5613c5037dfe4e831ff | 23,674 |
def sample_mixture_gaussian(batch_size, p_array, mu_list, sig_list, k=K, d=DIM):
"""
samples from a mixture of normals
:param batch_size: sample size
:param p_array: np array which includes probability for each component of mix
:param mu_list: list of means of each component
:param sig_list: list of covariance matrices of each component
:return: samples from mixture
"""
if hasattr(mu_list[0], "__len__"):
d = len(mu_list[0]) # dimension of distribution
else:
d = 1
k = len(mu_list) # number of mixtures
dataset = np.zeros([batch_size, d])
rh = np.random.choice(range(k), p=p_array, size=batch_size)
for i in range(batch_size):
if d > 1:
dataset[i, :] = np.random.multivariate_normal(mean=mu_list[rh[i]], cov=sig_list[rh[i]])
else:
dataset[i, :] = np.random.randn() * sig_list[rh[i]] + mu_list[rh[i]]
return dataset | 80374ed474ccb284a0cdb5efb63e44652318f0a2 | 23,675 |
def sign(x: float) -> float:
"""Return the sign of the argument. Zero returns zero."""
if x > 0:
return 1.0
elif x < 0:
return -1.0
else:
return 0.0 | 5998061fcb57ef0133c6ccd56e1ad79a31b06732 | 23,676 |
import numpy
def CalculateLocalDipoleIndex(mol):
"""
Calculation of local dipole index (D)
"""
GMCharge.ComputeGasteigerCharges(mol, iter_step)
res = []
for atom in mol.GetAtoms():
res.append(float(atom.GetProp('_GasteigerCharge')))
cc = [numpy.absolute(res[x.GetBeginAtom().GetIdx()] - res[x.GetEndAtom().GetIdx()]) for x in mol.GetBonds()]
B = len(mol.GetBonds())
return round(sum(cc) / B, 3) | f4e1f0cd0130cc1e94430eac2df910946f4e98d0 | 23,677 |
def tile1(icon="", **kw):
"""<!-- Tile with icon, icon can be font icon or image -->"""
ctx=[kw['tile_label']]
s = span(cls="icon %s" % icon)
ctx.append(s)
d2 = div(ctx=ctx, cls="tile-content iconic")
return d2 | fdcdecbc81733ae6b615cf5db5bce60585337efe | 23,678 |
from typing import Optional
def config_server(sender_email:str, sender_autorization_code:str, smtp_host: Optional[str] = None, smtp_port: Optional[int] = None, timeout=10):
"""
smtp server configuration
:param sender_email: sender's email
:param sender_autorization_code: sender's smtp authorization code
:param smtp_host: smtp host address
:param smtp_port: smtp host port
:param timeout: timeout
:return: smtp server object
"""
assert isinstance(sender_email, str), "sender_email should be given a string"
assert isinstance(sender_autorization_code, str), "sender_authorization_code should be given a string"
s = server(sender_email, sender_autorization_code, smtp_host=smtp_host, smtp_port=smtp_port, timeout=timeout)
if s.smtp_able():
print("server config success")
return s
else:
raise SMTPConfigException | f93b9efff8e8f415242bb9dbb5e09529baa1e238 | 23,679 |
def try_to_import_file(file_name):
"""
Tries to import the file as Python module. First calls
import_file_as_package() and falls back to import_file_as_module(). If
fails, keeps silent on any errors and returns the occured exceptions.
:param file_name: The path to import.
:return: The loaded module or tuple of length 2 with the exceptions.
"""
try:
return import_file_as_package(file_name)
except Exception as e1:
try:
return import_file_as_module(file_name)
except Exception as e2:
return e1, e2 | 15ab5c695bb7801b894c4466994abbb9f4ad791a | 23,680 |
def is_uppervowel(char: str) -> bool:
"""
Checks if the character is an uppercase Irish vowel (aeiouáéíóú).
:param char: the character to check
:return: true if the input is a single character, is uppercase, and is an Irish vowel
"""
vowels = "AEIOUÁÉÍÓÚ"
return len(char) == 1 and char[0] in vowels | 14e87fc53fbb31c2a1ba66d17082be533ef8c5a9 | 23,681 |
from typing import Optional
def visualize_permutation_results(
obs_r2: float,
permuted_r2: np.ndarray,
verbose: bool = True,
permutation_color: str = "#a6bddb",
output_path: Optional[str] = None,
show: bool = True,
close: bool = False,
) -> float:
"""
Parameters
----------
obs_r2 : float
Denotes the r2 value obtained using `x2_array` to predict `x1_array`,
given `z_array` if it was not None.
permuted_r2 : 1D np.ndarray
Should have length `num_permutations`. Each element denotes the r2
attained using a permuted version of `x2_array` to predict `x1_array`,
given `z_array` if it was not None.
verbose : optional, bool.
Denotes whether or not the p-value of the permutation test will be
printed to the stdout. Default == True.
permutation_color : optional, str.
Denotes the color of the kernel density estimate used to visuale the
distribution of r2 from the permuted values of `x2_array`.
Default == '#a6bddb'.
output_path : optional, str or None.
Denotes the path to the location where the plot visualizing the
permutation test results will be stored. If `output_path` is None, the
plot will not be stored. Default is None.
show : optional, bool.
Denotes whether the matplotlib figure that visualizes the results of
the permutation test should be shown. Default == True.
close : optional, bool.
Denotes whether the matplotlib figure that visualizes the results of
the permutation test should be closed. Default == False.
Returns
-------
p_value : float.
The p-value of the visual permutation test, denoting the percentage of
times that the r2 with permuted `x2_array` was greater than the r2 with
the observed `x2_array`.
"""
fig, ax = plt.subplots(figsize=(10, 6))
p_value = (obs_r2 < permuted_r2).mean()
if verbose:
msg = "The p-value of the permutation independence test is {:.2f}."
print(msg.format(p_value))
sbn.kdeplot(permuted_r2, ax=ax, color=permutation_color, label="Simulated")
v_line_label = "Observed\np-val: {:0.3f}".format( # noqa: F522
p_value, precision=1
)
ax.vlines(
obs_r2,
ax.get_ylim()[0],
ax.get_ylim()[1],
linestyle="dashed",
color="black",
label=v_line_label,
)
ax.set_xlabel(r"$r^2$", fontsize=13)
ax.set_ylabel(
"Density", fontdict={"fontsize": 13, "rotation": 0}, labelpad=40
)
ax.legend(loc="best")
sbn.despine()
if output_path is not None:
fig.savefig(output_path, dpi=500, bbox_inches="tight")
if show:
plt.show()
if close:
plt.close(fig=fig)
return p_value | cfdf84fd78cd54b39eb6db9b0af799a230a294c8 | 23,682 |
def htmlmovie(html_index_fname,pngfile,framenos,figno):
#=====================================
"""
Input:
pngfile: a dictionary indexed by (frameno,figno) with value the
corresponding png file for this figure.
framenos: a list of frame numbers to include in movie
figno: integer with the figure number for this movie.
Returns:
text for an html file that incorporates javascript to loop through the
plots one after another.
New 6/7/10: The html page also has buttons for controlling the movie.
The parameter iterval below is the time interval between loading
successive images and is in milliseconds.
The img_width and img_height parameters do not seem to have any effect.
"""
text = """
<html>
<head>
<script language="Javascript">
<!---
var num_images = %s; """ % len(framenos)
text += """
var img_width = 800;
var img_height = 600;
var interval = 300;
var images = new Array();
function preload_images()
{
t = document.getElementById("progress");
"""
i = 0
for frameno in framenos:
i = i+1
text += """
t.innerHTML = "Preloading image ";
images[%s] = new Image(img_width, img_height);
images[%s].src = "%s";
""" % (i,i,pngfile[frameno,figno])
text += """
t.innerHTML = "";
}
function tick()
{
frame += 1;
if (frame > num_images+1)
frame = 1;
document.movie.src = images[frame].src;
tt = setTimeout("tick()", interval);
}
function startup()
{
preload_images();
frame = 1;
document.movie.src = images[frame].src;
}
function rewind()
{
frame = 1;
document.movie.src = images[frame].src;
}
function start()
{
tt = setTimeout("tick()", interval);
}
function pause()
{
clearTimeout(tt);
}
function restart()
{
tt = setTimeout("tick()", interval);
}
function slower()
{
interval = interval / 0.7;
}
function faster()
{
interval = interval * 0.7;
}
// --->
</script>
</head>
<body onLoad="startup();">
<form>
<input type="button" value="Start movie" onClick="start()">
<input type="button" value="Pause" onClick="pause()">
<input type="button" value="Rewind" onClick="rewind()">
<input type="button" value="Slower" onClick="slower()">
<input type="button" value="Faster" onClick="faster()">
<a href="%s">Plot Index</a>
</form>
<p><div ID="progress"></div></p>
<img src="%s" name="movie"/>
</body>
</html>
""" % (html_index_fname,pngfile[framenos[0],figno])
return text
# end of htmlmovie | 7be1cf8ffce35e51667a67f322fbf038f396e817 | 23,683 |
from pathlib import Path
import re
import io
def readin_q3d_matrix_m(path: str) -> pd.DataFrame:
"""Read in Q3D cap matrix from a .m file exported by Ansys Q3d.
Args:
path (str): Path to .m file
Returns:
pd.DataFrame of cap matrix, with no names of columns.
"""
text = Path(path).read_text()
match = re.findall(r'capMatrix (.*?)]', text, re.DOTALL)
if match:
match = match[0].strip('= [').strip(']').strip('\n')
dfC = pd.read_csv(io.StringIO(match),
skipinitialspace=True,
header=None)
return dfC | 35a79ff4697ba1df3b2c1754d8b28064b459201f | 23,684 |
def get_DB(type='mysql'):
"""
Parameters
----------
type
Returns
-------
"""
if type == 'mysql':
return MySQLAdapter
elif type == 'mongodb':
return MongoAdapter | 07a3f0c1fcac691855f616e2e96d5ab947ca7be3 | 23,685 |
def movstd(x,window):
""" Computes the moving standard deviation for a 1D array. Returns
an array with the same length of the input array.
Small window length provides a finer description of deviation
Longer window coarser (faster to compute).
By default, each segment is centered, going L/2 to L/2-1 around Ai.
Parameters
----------
x : input numpy array (1D)
window : integer for the evaluation window,
Returns
-------
1d vector of standard deviations
"""
if not type(x)==np.ndarray:
x=np.array(x)
if window%2:
window=window-1
win2 = np.floor(window/2)
N=len(x)
y=np.full(N,medx)
for ii in np.arange(win2,N-win2+1,window):
try:
idx=(np.arange(-win2,win2)+ii).astype(np.int)
y[idx] = np.nanstd(x[idx])
except:
pass
return y | e9c4bc43f92d6d22c8191d1d15b93a51aadef32c | 23,686 |
def get_attribute(parent, selector, attribute, index=0):
"""Get the attribute value for the child element of parent matching the given CSS selector
If index is specified, return the attribute value for the matching child element with the specified zero-based index; otherwise, return the attribute value for the first matching child element.
If selector is None, return the attribute value for parent instead.
"""
if selector is None:
return parent.get(attribute)
else:
values = get_attributes(parent, selector, attribute)
if (index < 0 and len(values) >= abs(index)) or (index >= 0 and len(values) > index):
return values[index] | fff9ec0a30dd00431164c69f5ba3430ec09f804a | 23,687 |
import torch
def cal_head_bbox(kps, image_size):
"""
Args:
kps (torch.Tensor): (N, 19, 2)
image_size (int):
Returns:
bbox (torch.Tensor): (N, 4)
"""
NECK_IDS = 12 # in cocoplus
kps = (kps + 1) / 2.0
necks = kps[:, NECK_IDS, 0]
zeros = torch.zeros_like(necks)
ones = torch.ones_like(necks)
# min_x = int(max(0.0, np.min(kps[HEAD_IDS:, 0]) - 0.1) * image_size)
min_x, _ = torch.min(kps[:, NECK_IDS:, 0] - 0.05, dim=1)
min_x = torch.max(min_x, zeros)
max_x, _ = torch.max(kps[:, NECK_IDS:, 0] + 0.05, dim=1)
max_x = torch.min(max_x, ones)
# min_x = int(max(0.0, np.min(kps[HEAD_IDS:, 0]) - 0.1) * image_size)
min_y, _ = torch.min(kps[:, NECK_IDS:, 1] - 0.05, dim=1)
min_y = torch.max(min_y, zeros)
max_y, _ = torch.max(kps[:, NECK_IDS:, 1], dim=1)
max_y = torch.min(max_y, ones)
min_x = (min_x * image_size).long() # (T,)
max_x = (max_x * image_size).long() # (T,)
min_y = (min_y * image_size).long() # (T,)
max_y = (max_y * image_size).long() # (T,)
rects = torch.stack((min_x, max_x, min_y, max_y), dim=1)
return rects | 546b4d4fcf756a75dd588c85ab467c21e9f45550 | 23,689 |
def my_json_render(docs, style="dep", options=None, manual=False) -> list:
"""
Render nlp visualisation.
Args:
docs (list or Doc): Document(s) to visualise.
style (unicode): Visualisation style, 'dep' or 'ent'.
options (dict): Visualiser-specific options, e.g. colors.
manual (bool): Don't parse `Doc` and
instead expect a dict/list of dicts.
Returns:
[{'text': '近一周饮食不当,一度腹泻,日3次,泻下后精神疲烦,时有低热,怕风,口干,痰中夹有血丝,左侧胸痛时作',
'ents': [{'start': 20, 'end': 24, 'label': 'ZZ'},
{'start': 25, 'end': 27, 'label': 'CD'},
{'start': 27, 'end': 29, 'label': 'ZZ'},
{'start': 30, 'end': 32, 'label': 'ZZ'},
{'start': 33, 'end': 35, 'label': 'ZZ'},
{'start': 36, 'end': 42, 'label': 'ZZ'}],
'title': None, 'settings': {'lang': 'zh', 'direction': 'ltr'}}]
"""
if options is None:
options = {}
factories = {
"dep": (DependencyRenderer, parse_deps),
"ent": (EntityRenderer, parse_ents),
}
if style not in factories:
raise ValueError(Errors.E087.format(style=style))
if isinstance(docs, (Doc, Span, dict)):
docs = [docs]
docs = [obj if not isinstance(obj, Span) else obj.as_doc() for obj in docs]
if not all(isinstance(obj, (Doc, Span, dict)) for obj in docs):
raise ValueError(Errors.E096)
renderer, converter = factories[style]
renderer = renderer(options=options)
parsed = [converter(doc, options) for doc in docs] if not manual else docs
return parsed | a19068ae0c9e4eb89e810f378ccc8d5fbd14547a | 23,690 |
import json
def get_testcase_chain(testcase_id, case_type, chain_list=None, with_intf_system_name=None, with_extract=None,
only_first=False, main_case_flow_id=None, childless=False):
"""
根据testcase_id获取调用链, 包含接口用例和全链路用例
return example:
[
{
"preCaseId": 1,
"preCaseName": "指定手机获取验证码",
"preCaseType": "接口用例",
"preIntfName": "接口描述-/url/api"
},
{
"preCaseId": 27,
"preCaseName": "新户申请钱包",
"preCaseType": "全链路用例"
},
{
"preCaseId": 2,
"preCaseName": "登录",
"preCaseType": "接口用例"
}
]
"""
if not chain_list:
chain_list = []
# 调用链最大长度保护
if len(chain_list) >= 100:
return chain_list
if case_type == 1:
tc_obj = ApiTestcaseInfoManager.get_testcase(id=testcase_id)
if tc_obj:
if with_intf_system_name:
intf_obj = ApiIntfInfoManager.get_intf(id=tc_obj.api_intf_id)
system_obj = ApiSystemInfoManager.get_system(id=intf_obj.api_system_id)
chain_row_dic = {
"preCaseName": '{0}__{1}'.format(tc_obj.testcase_name, tc_obj.expect_result),
"preCaseId": tc_obj.id,
"preCaseType": get_desc_by_case_type(case_type),
"preIntfName": '{0}-{1}'.format(intf_obj.intf_desc, intf_obj.intf_name),
"preSystemName": system_obj.system_name
}
if with_extract:
# 解析出用例中提取变量
extract_v_names = get_extract_v_names(testcase_id)
public_v_names = get_public_v_names(tc_obj)
chain_row_dic.update({"extract_v_names": extract_v_names, "public_v_names": public_v_names})
chain_list.insert(0, chain_row_dic)
else:
chain_row_dic = {
"preCaseName": '{0}__{1}'.format(tc_obj.testcase_name, tc_obj.expect_result),
"preCaseId": tc_obj.id,
"preCaseType": get_desc_by_case_type(case_type),
}
if with_extract:
# 解析出用例中提取变量
extract_v_names = get_extract_v_names(testcase_id)
public_v_names = get_public_v_names(tc_obj)
chain_row_dic.update({"extract_v_names": extract_v_names, "public_v_names": public_v_names})
chain_list.insert(0, chain_row_dic)
if childless:
chain_list[0]['hasChildren'] = False
return chain_list
setup_case_list = json.loads(tc_obj.setup_case_list) if tc_obj.setup_case_list else []
setup_case_list.reverse()
if setup_case_list:
if only_first:
chain_list[0]['hasChildren'] = True
return chain_list
else:
# 继续递归查询前置
for setup_case_str in setup_case_list:
setup_case_type, setup_case_id, option = parse_setup_case_str(setup_case_str)
kwargs = {
'chain_list': chain_list,
'with_intf_system_name': with_intf_system_name,
'with_extract': with_extract
}
if setup_case_type == 1:
if option == 'self':
kwargs['childless'] = True
elif setup_case_type == 2:
kwargs['main_case_flow_id'] = option
chain_list = get_testcase_chain(setup_case_id, setup_case_type, **kwargs)
# setup_case_type, setup_case_id, setup_case_flow_id = parse_setup_case_str(setup_case_str)
# chain_list = get_testcase_chain(
# setup_case_id, setup_case_type, chain_list=chain_list,
# with_intf_system_name=with_intf_system_name, with_extract=with_extract,
# main_case_flow_id=setup_case_flow_id
# )
else:
if only_first:
chain_list[0]['hasChildren'] = False
return chain_list
return chain_list
elif case_type == 2:
tm_obj = ApiTestcaseMainManager.get_testcase_main(id=testcase_id)
if tm_obj:
chain_list.insert(
0,
{
"preCaseName": '{0}__{1}'.format(tm_obj.testcase_name, tm_obj.expect_result),
"preCaseId": tm_obj.id,
"preCaseType": get_desc_by_case_type(case_type),
"preIntfName": '',
"preSystemName": '',
"customFlowId": None,
"customFlowName": ''
}
)
if only_first:
chain_list[0]['hasChildren'] = False
if main_case_flow_id:
flow_obj = ApiTestcaseMainCustomFlowManager.get_flow(id=main_case_flow_id)
if flow_obj:
chain_list[0]['customFlowName'] = flow_obj.flow_name
chain_list[0]['customFlowId'] = flow_obj.id
return chain_list | 92892c432a46287559c41fe9d1b5fb11dec35e86 | 23,691 |
from typing import Iterable
def approximate_parameter_profile(
problem: Problem,
result: Result,
profile_index: Iterable[int] = None,
profile_list: int = None,
result_index: int = 0,
n_steps: int = 100,
) -> Result:
"""
Calculate profiles based on an approximation via a normal likelihood
centered at the chosen optimal parameter value, with the covariance matrix
being the Hessian or FIM.
Parameters
----------
problem:
The problem to be solved.
result:
A result object to initialize profiling and to append the profiling
results to. For example, one might append more profiling runs to a
previous profile, in order to merge these.
The existence of an optimization result is obligatory.
profile_index:
List with the profile indices to be computed
(by default all of the free parameters).
profile_list:
Integer which specifies whether a call to the profiler should create
a new list of profiles (default) or should be added to a specific
profile list.
result_index:
Index from which optimization result profiling should be started
(default: global optimum, i.e., index = 0).
n_steps:
Number of profile steps in each dimension.
Returns
-------
result:
The profile results are filled into `result.profile_result`.
"""
# Handling defaults
# profiling indices
if profile_index is None:
profile_index = problem.x_free_indices
# create the profile result object (retrieve global optimum) or append to
# existing list of profiles
global_opt = initialize_profile(problem, result, result_index,
profile_index, profile_list)
# extract optimization result
optimizer_result = result.optimize_result.list[result_index]
# extract values of interest
x = optimizer_result.x
fval = optimizer_result.fval
hess = problem.get_reduced_matrix(optimizer_result.hess)
# ratio scaling factor
ratio_scaling = np.exp(global_opt - fval)
# we need the hessian - compute if not provided or fishy
if hess is None or np.isnan(hess).any():
logger.info("Computing Hessian/FIM as not available in result.")
hess = problem.objective(
problem.get_reduced_vector(x), sensi_orders=(2,))
# inverse of the hessian
sigma = np.linalg.inv(hess)
# the steps
xs = np.linspace(problem.lb_full, problem.ub_full, n_steps).T
# loop over parameters for profiling
for i_par in profile_index:
# not requested or fixed -> compute no profile
if i_par in problem.x_fixed_indices:
continue
i_free_par = problem.full_index_to_free_index(i_par)
ys = multivariate_normal.pdf(xs[i_par], mean=x[i_par],
cov=sigma[i_free_par, i_free_par])
fvals = - np.log(ys)
ratios = ys / ys.max() * ratio_scaling
profiler_result = ProfilerResult(
x_path=xs,
fval_path=fvals,
ratio_path=ratios
)
result.profile_result.set_profiler_result(
profiler_result=profiler_result,
i_par=i_par, profile_list=profile_list)
return result | 478a95b370360c18a808e1753a8ad60f6a7b1bb7 | 23,692 |
def _process_input(data, context):
""" pre-process request input before it is sent to
TensorFlow Serving REST API
Args:
data (obj): the request data, in format of dict or string
context (Context): object containing request and configuration details
Returns:
(dict): a JSON-serializable dict that contains request body and headers
"""
if context.request_content_type == 'application/json':
data = data.read().decode("utf-8")
return data if len(data) else ''
raise ValueError('{{"error": "unsupported content type {}"}}'.format(
context.request_content_type or "unknown"
)) | 05d48d327613df156a5a3b6ec76e6e5023fa54ca | 23,693 |
from ibmsecurity.appliance.ibmappliance import IBMError
def update_policies(isamAppliance, name, policies, action, check_mode=False, force=False):
"""
Update a specified policy set's policies (add/remove/set)
Note: Please input policies as an array of policy names (it will be converted to id's)
"""
pol_id, update_required, json_data = _check_policies(isamAppliance, name, policies, action)
if pol_id is None:
raise IBMError("999", "Cannot update data for unknown policy set: {0}".format(name))
if force is True or update_required is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put(
"Update a specified policy set",
"{0}/{1}/policies{2}".format(uri, pol_id, tools.create_query_string(action=action)), json_data)
return isamAppliance.create_return_object() | 666fd658f8d6748f8705a098b0f773f3fa758bbe | 23,694 |
def is_bullish_engulfing(previous: Candlestick, current: Candlestick) -> bool:
"""Engulfs previous candle body. Wick and tail not included"""
return (
previous.is_bearish
and current.is_bullish
and current.open <= previous.close
and current.close > previous.open
) | ab46a10009368cbb057ddf79ee9eda56ab862169 | 23,695 |
import math
def yaw_cov_to_quaternion_cov(yaw, yaw_covariance):
"""Calculate the quaternion covariance based on the yaw and yaw covariance.
Perform the operation :math:`C_{\\theta} = R C_q R^T`
where :math:`C_{\\theta}` is the yaw covariance,
:math:`C_q` is the quaternion covariance and :math:`R` is
the Jacobian of the transform from yaw to a quaternion.
:math:`R` will be a collumn vector defined by:
.. math::
R = \\\\
\\frac{dx}{d\\theta} &= 0, \\\\
\\frac{dy}{d\\theta} &= 0, \\\\
\\frac{dz}{d\\theta} &= \\frac{1}{2} \\cos \\frac{1}{2} \\theta, \\\\
\\frac{dw}{d\\theta} &= -\\frac{1}{2} \\sin \\frac{1}{2} \\theta, \\\\
:param yaw: Yaw of the vehicle in radians
:type quat: float
:return: The yaw covariance transformed to quaternion coordinates.
:rtype: 4x4 numpy array
"""
R = np.c_[0,
0,
0.5 * math.cos(yaw * 0.5),
-0.5 * math.sin(yaw * 0.5)].T
quat_covariance = R.dot(yaw_covariance).dot(R.T)
return quat_covariance | f98a7b996ea290f735214704d592c5926ca4d07f | 23,696 |
import logging
async def token(req: web.Request) -> web.Response:
"""Auth endpoint."""
global nonce, user_eppn, user_family_name, user_given_name
id_token = {
"at_hash": "fSi3VUa5i2o2SgY5gPJZgg",
"sub": "smth",
"eduPersonAffiliation": "member;staff",
"eppn": user_eppn,
"displayName": f"{user_given_name} {user_family_name}",
"iss": "http://mockauth:8000",
"schacHomeOrganizationType": "urn:schac:homeOrganizationType:test:other",
"given_name": user_given_name,
"nonce": nonce,
"aud": "aud2",
"acr": "http://mockauth:8000/LoginHaka",
"nsAccountLock": "false",
"eduPersonScopedAffiliation": "[email protected];[email protected]",
"auth_time": 1606579533,
"name": f"{user_given_name} {user_family_name}",
"schacHomeOrganization": "test.what",
"exp": 9999999999,
"iat": 1561621913,
"family_name": user_family_name,
"email": user_eppn,
}
data = {"access_token": "test", "id_token": jwt.encode(header, id_token, jwk_pair[1]).decode("utf-8")}
logging.info(data)
return web.json_response(data) | 771d21043a1185a7a6b4bd34fda5ae78ad45d51e | 23,697 |
def split_train_test(observations, train_percentage):
"""Splits observations into a train and test set.
Args:
observations: Observations to split in train and test. They can be the
representation or the observed factors of variation. The shape is
(num_dimensions, num_points) and the split is over the points.
train_percentage: Fraction of observations to be used for training.
Returns:
observations_train: Observations to be used for training.
observations_test: Observations to be used for testing.
"""
num_labelled_samples = observations.shape[1]
num_labelled_samples_train = int(
np.ceil(num_labelled_samples * train_percentage))
num_labelled_samples_test = num_labelled_samples - num_labelled_samples_train
observations_train = observations[:, :num_labelled_samples_train]
observations_test = observations[:, num_labelled_samples_train:]
assert observations_test.shape[1] == num_labelled_samples_test, \
"Wrong size of the test set."
return observations_train, observations_test | 8b6aa5896c5ae8fc72414e707013248fcb320d88 | 23,698 |
def InitF11(frame):
"""F6 to navigate between regions
:param frame: see InitShorcuts->param
:type frame: idem
:return: entrie(here tuple) for AcceleratorTable
:rtype: tuple(int, int, int)
"""
frame.Bind(wx.EVT_MENU, frame.shell.SetFocus, id=wx.ID_SHELL_FOCUS)
return (wx.ACCEL_NORMAL, wx.WXK_F11, wx.ID_SHELL_FOCUS) | 055852664e48154768353af109ec1be533a7ad4a | 23,699 |
def ReadExactly(from_stream, num_bytes):
"""Reads exactly num_bytes from a stream."""
pieces = []
bytes_read = 0
while bytes_read < num_bytes:
data = from_stream.read(min(MAX_READ, num_bytes - bytes_read))
bytes_read += len(data)
pieces.append(data)
return ''.join(pieces) | 5fcd6f204734779e81e7c4b9f263ad4534426278 | 23,700 |
import json
import phantom.rules as phantom
from hashlib import sha256
def indicator_collect(container=None, artifact_ids_include=None, indicator_types_include=None, indicator_types_exclude=None, indicator_tags_include=None, indicator_tags_exclude=None, **kwargs):
"""
Collect all indicators in a container and separate them by data type. Additional output data paths are created for each data type. Artifact scope is ignored.
Args:
container (CEF type: phantom container id): The current container
artifact_ids_include (CEF type: phantom artifact id): Optional parameter to only look for indicator values that occur in the artifacts with these IDs. Must be one of: json serializable list, comma separated integers, or a single integer.
indicator_types_include: Optional parameter to only include indicators with at least one of the provided types in the output. If left empty, all indicator types will be included except those that are explicitly excluded. Accepts a comma-separated list.
indicator_types_exclude: Optional parameter to exclude indicators with any of the provided types from the output. Accepts a comma-separated list.
indicator_tags_include: Optional parameter to only include indicators with at least one of the provided tags in the output. If left empty, tags will be ignored except when they are excluded. Accepts a comma-separated list.
indicator_tags_exclude: Optional parameter to exclude indicators with any of the provided tags from the output. Accepts a comma-separated list.
Returns a JSON-serializable object that implements the configured data paths:
all_indicators.*.cef_key
all_indicators.*.cef_value
all_indicators.*.data_types
all_indicators.*.artifact_id
domain.*.cef_key
domain.*.cef_value (CEF type: domain)
domain.*.artifact_id
file_name.*.cef_key (CEF type: file name)
file_name.*.cef_value (CEF type: file name)
file_name.*.artifact_id
"""
############################ Custom Code Goes Below This Line #################################
outputs = {'all_indicators': []}
def grouper(seq, size):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
def get_indicator_json(value_set):
value_list = list(value_set)
indicator_url = phantom.build_phantom_rest_url('indicator') + '?page_size=0&timerange=all'
hashed_list = [sha256(item.encode('utf-8')).hexdigest() for item in value_list]
indicator_dictionary = {}
for group in grouper(hashed_list, 100):
query_url = indicator_url + f'&_filter_value_hash__in={group}'
indicator_response = phantom.requests.get(query_url, verify=False)
indicator_json = indicator_response.json() if indicator_response.status_code == 200 else {}
for data in indicator_json.get('data', []):
indicator_dictionary[data['value_hash']] = data
return indicator_dictionary
def check_numeric_list(input_list):
return (all(isinstance(x, int) for x in input_list) or all(x.isnumeric() for x in input_list))
def is_valid_indicator(list_1=None, list_2=None, check_type="include"):
list_1 = [] if not list_1 else list_1
list_2 = [] if not list_2 else list_2
if check_type == 'exclude':
if list_1 and any(item in list_1 for item in list_2):
return False
elif check_type == 'include':
if list_1 and not any(item in list_1 for item in list_2):
return False
return True
# validate container and get ID
if isinstance(container, dict) and container['id']:
container_dict = container
container_id = container['id']
elif isinstance(container, int):
rest_container = phantom.requests.get(uri=phantom.build_phantom_rest_url('container', container), verify=False).json()
if 'id' not in rest_container:
raise RuntimeError('Failed to find container with id {container}')
container_dict = rest_container
container_id = container
else:
raise TypeError("The input 'container' is neither a container dictionary nor a valid container id, so it cannot be used")
if indicator_types_include:
indicator_types_include = [item.strip(' ') for item in indicator_types_include.split(',')]
if indicator_types_exclude:
indicator_types_exclude = [item.strip(' ') for item in indicator_types_exclude.split(',')]
if indicator_tags_include:
indicator_tags_include = [item.strip(' ').replace(' ', '_') for item in indicator_tags_include.split(',')]
if indicator_tags_exclude:
indicator_tags_exclude = [item.strip(' ').replace(' ', '_') for item in indicator_tags_exclude.split(',')]
if artifact_ids_include:
# Try to convert to a valid list
if isinstance(artifact_ids_include, str) and artifact_ids_include.startswith('[') and artifact_ids_include.endswith(']'):
artifact_ids_include = json.loads(artifact_ids_include)
elif isinstance(artifact_ids_include, str):
artifact_ids_include = artifact_ids_include.replace(' ','').split(',')
elif isinstance(artifact_ids_include, int):
artifact_ids_include = [artifact_ids_include]
# Check validity of list
if isinstance(artifact_ids_include, list) and not check_numeric_list(artifact_ids_include):
raise ValueError(
f"Invalid artifact_ids_include entered: '{artifact_ids_include}'. Must be a list of integers."
)
artifact_ids_include = [int(art_id) for art_id in artifact_ids_include]
indicator_set = set()
# fetch all artifacts in the container
container_artifact_url = phantom.build_phantom_rest_url('artifact')
container_artifact_url += f'?_filter_container={container_id}&page_size=0&include_all_cef_types'
artifacts = phantom.requests.get(container_artifact_url, verify=False).json()['data']
for artifact in artifacts:
artifact_id = artifact['id']
if (artifact_ids_include and artifact_id in artifact_ids_include) or not artifact_ids_include:
for cef_key in artifact['cef']:
cef_value = artifact['cef'][cef_key]
data_types = artifact['cef_types'].get(cef_key, [])
# get indicator details if valid type
if (
(
is_valid_indicator(indicator_types_exclude, data_types, check_type='exclude')
and is_valid_indicator(indicator_types_include, data_types, check_type='include')
)
and
(
isinstance(cef_value, str) or isinstance(cef_value, bool) or isinstance(cef_value, int) or isinstance(cef_value, float)
)
):
indicator_set.add(str(cef_value))
indicator_dictionary = get_indicator_json(indicator_set)
for artifact in artifacts:
artifact_id = artifact['id']
if (artifact_ids_include and artifact_id in artifact_ids_include) or not artifact_ids_include:
for cef_key in artifact['cef']:
cef_value = artifact['cef'][cef_key]
cef_value_hash = sha256(str(cef_value).encode('utf-8')).hexdigest()
data_types = artifact['cef_types'].get(cef_key, [])
if indicator_dictionary.get(cef_value_hash):
tags = indicator_dictionary[cef_value_hash]['tags']
if (
is_valid_indicator(indicator_tags_exclude, tags, check_type='exclude')
and is_valid_indicator(indicator_tags_include, tags, check_type='include')
):
outputs['all_indicators'].append({
'cef_key': cef_key,
'cef_value': cef_value,
'artifact_id': artifact_id,
'data_types': data_types,
'tags': tags
})
for data_type in data_types:
# outputs will have underscores instead of spaces
data_type_escaped = data_type.replace(' ', '_')
if data_type_escaped not in outputs:
outputs[data_type_escaped] = []
outputs[data_type_escaped].append(
{'cef_key': cef_key, 'cef_value': cef_value, 'artifact_id': artifact_id, 'tags': tags}
)
if outputs.get('all_indicators'):
# sort the all_indicators outputs to make them more consistent
outputs['all_indicators'].sort(key=lambda indicator: str(indicator['cef_value']))
# Return a JSON-serializable object
assert json.dumps(outputs) # Will raise an exception if the :outputs: object is not JSON-serializable
return outputs | 1e7681f66231e856a9f6a264884556c44fa5b42d | 23,701 |
def remove_duplicates(iterable):
"""Removes duplicates of an iterable without meddling with the order"""
seen = set()
seen_add = seen.add # for efficiency, local variable avoids check of binds
return [x for x in iterable if not (x in seen or seen_add(x))] | d98fdf8a4be281008fa51344610e5d052aa77cae | 23,702 |
def verify_my_token(user: User = Depends(auth_user)):
"""
Verify a token, and get basic user information
"""
return {"token": get_token(user),
"email": user.email,
"is_admin": user.is_admin,
"restricted_job": user.restricted_job} | ee628ab199c7b60ee5fd79103735f6bba51e26a0 | 23,703 |
def inv_partition_spline_curve(x):
"""The inverse of partition_spline_curve()."""
c = lambda z: tf.cast(z, x.dtype)
assert_ops = [tf.Assert(tf.reduce_all(x >= 0.), [x])]
with tf.control_dependencies(assert_ops):
alpha = tf.where(
x < 8,
c(0.5) * x + tf.where(
x <= 4,
c(1.25) - tf.sqrt(c(1.5625) - x + c(.25) * tf.square(x)),
c(-1.25) + tf.sqrt(c(9.5625) - c(3) * x + c(.25) * tf.square(x))),
c(3.75) + c(0.25) * util.exp_safe(x * c(3.6) - c(28.8)))
return alpha | 815b91cff13aea862fe1681eed33ebf6497a047b | 23,704 |
def _orbit_bbox(partitions):
""" Takes a granule's partitions 'partitions' and returns the bounding box
containing all of them. Bounding box is ll, ur format
[[lon, lat], [lon, lat]]. """
lon_min = partitions[0]['lon_min']
lat_min = partitions[0]['lat_min']
lon_max = partitions[0]['lon_max']
lat_max = partitions[0]['lat_max']
for p in partitions[1:]:
if p['lon_min'] < lon_min:
lon_min = p['lon_min']
if p['lat_min'] < lat_min:
lat_min = p['lat_min']
if p['lon_max'] > lon_max:
lon_max = p['lon_max']
if p['lat_max'] > lat_max:
lat_max = p['lat_max']
return [[lon_min, lat_min], [lon_max, lat_max]] | 8e040b549cbdf9587f08a285bd6f867ae580d584 | 23,705 |
def GetModel(name: str) -> None:
"""
Returns model from model pool that coresponds
to the given name. Raises GraphicsException
if certain model cannot be found.
param name: Name of a model.
"""
if not name in _models:
raise GraphicsException(f"No such model '{name}'.")
return _models[name] | 162b7279f7491c614a72bbb9dc6bbdfd591a7c9c | 23,706 |
def db_to_dict(s_str, i = 0, d = {}):
""" Converts a dotbracket string to a dictionary of indices and their pairs
Args:
s_str -- str: secondary_structure in dotbracket notation
KWargs:
i -- int: start index
d -- dict<index1, index2>: the dictionary so far
Returns:
dictionary
"""
j = i
while j < len(s_str):
c = s_str[j]
if c == "(":
d = db_to_dict(s_str, j + 1, d)
j = d[j]
elif c == ")":
d[i - 1] = j
d[j] = i - 1
if(i != 0): return d # Don't return from the first iteration yet
else:
d[j] = None
j = j + 1
return d | 5440bc318b0b5c8a137e0a3f739031603994e89c | 23,707 |
def identify_event_type(event):
"""Look at event to determine type of device.
Async friendly.
"""
if EVENT_KEY_COMMAND in event:
return EVENT_KEY_COMMAND
if EVENT_KEY_SENSOR in event:
return EVENT_KEY_SENSOR
return "unknown" | d6c504e4edd2993a407ce36eea7688010a46c2be | 23,708 |
def pcolormesh_nan(x: np.ndarray, y: np.ndarray, c: np.ndarray, cmap=None, axis=None):
"""handles NaN in x and y by smearing last valid value in column or row out,
which doesn't affect plot because "c" will be masked too
"""
mask = np.isfinite(x) & np.isfinite(y)
top = None
bottom = None
for i, m in enumerate(mask):
good = m.nonzero()[0]
if good.size == 0:
continue
elif top is None:
top = i
else:
bottom = i
x[i, good[-1] :] = x[i, good[-1]]
y[i, good[-1] :] = y[i, good[-1]]
x[i, : good[0]] = x[i, good[0]]
y[i, : good[0]] = y[i, good[0]]
x[:top, :] = np.nanmean(x[top, :])
y[:top, :] = np.nanmean(y[top, :])
x[bottom:, :] = np.nanmean(x[bottom, :])
y[bottom:, :] = np.nanmean(y[bottom, :])
if axis is None:
axis = figure().gca()
return axis.pcolormesh(x, y, np.ma.masked_where(~mask, c), cmap=cmap) | cfd26ee1b110099220390c6771668ba1b422278a | 23,709 |
def delete_post(post_id):
"""Delete a post
:param post_id: id of the post object
:return: redirect or 404
"""
if Post.delete_post(post_id):
logger.warning('post %d has been deleted', post_id)
return redirect(url_for('.posts'))
else:
return render_template('page_not_found.html'), 404 | 0511287930d66143ee152c5f670918b73fb34250 | 23,710 |
from typing import Callable
import functools
from typing import Any
def log_arguments(func: Callable) -> Callable:
"""
decorate a function to log its arguments and result
:param func: the function to be decorated
:return: the decorator
"""
@functools.wraps(func)
def wrapper_args(*args, **kwargs) -> Any: # type: ignore
result = func(*args, **kwargs)
log_args_kwargs_results(func, result, -1, None, *args, **kwargs)
return result
return wrapper_args | a50af7d31049c0da929f649affbd51c12aa6d810 | 23,711 |
import time
def collect_gsso_dict(gsso):
""" Export gsso as a dict: keys are cls, ind, all (ie cls+ind)"""
print('Importing gsso as dict')
t0 = time.time()
gsso_cls_dict, gsso_ind_dict = _create_gsso_dict(gsso)
gsso_all_dict = _create_gsso_dict_all(gsso)
print("Executed in %s seconds." % str(time.time()-t0))
return gsso_cls_dict, gsso_ind_dict, gsso_all_dict | cdf14ae2ea6e5fe6e445d7b95a93b0df6423901c | 23,714 |
def H_squared(omega):
"""Square magnitude of the frequency filter function."""
return 1 / (
(1 + (omega * tau_a) ** 2) * (1 + (omega * tau_r) ** 2)
) * H_squared_heaviside(omega) | 60cda08d097901f679ce0fade20b062cb409bbae | 23,715 |
def get_neighbor_distances(ntw, v0, l):
"""Get distances to the nearest vertex neighbors along
connecting arcs.
Parameters
----------
ntw : spaghetti.Network
spaghetti Network object.
v0 : int
vertex id
l : dict
key is tuple (start vertex, end vertex); value is ``float``.
Cost per arc to travel, e.g. distance.
Returns
-------
neighbors : dict
key is int (vertex id); value is ``float`` (distance)
Examples
--------
>>> import spaghetti as spgh
>>> from libpysal import examples
>>> ntw = spgh.Network(examples.get_path('streets.shp'))
>>> neighs = spgh.util.get_neighbor_distances(ntw, 0, ntw.arc_lengths)
>>> neighs[1]
102.62353453439829
"""
# fetch links associated with vertices
arcs = ntw.enum_links_vertex(v0)
# create neighbor distance lookup
neighbors = {}
# iterate over each associated link
for arc in arcs:
# set distance from vertex1 to vertex2 (link length)
if arc[0] != v0:
neighbors[arc[0]] = l[arc]
else:
neighbors[arc[1]] = l[arc]
return neighbors | a7ec81a0c258a691786557e0f66e8ae17c5bbb86 | 23,716 |
from typing import Any
from typing import List
def is_generic_list(annotation: Any):
"""Checks if ANNOTATION is List[...]."""
# python<3.7 reports List in __origin__, while python>=3.7 reports list
return getattr(annotation, '__origin__', None) in (List, list) | 0ed718eed16e07c27fd5643c18a6e63dc9e38f69 | 23,717 |
from pathlib import Path
def create_folder(base_path: Path, directory: str, rtn_path=False):
""" Recursive directory creation function. Like mkdir(), but makes all intermediate-level directories needed to
contain the leaf directory
Parameters
-----------
base_path : pathlib.PosixPath
Global Path to be root of the created directory(s)
directory : str
Location in the Songbird-LFP-Paper the new directory is meant to be made
rtn_path : bool, optional
If True it returns a Path() object of the path to the Directory requested to be created
Returns
--------
location_to_save : class, (Path() from pathlib)
Path() object for the Directory requested to be created
Example
--------
# Will typically input a path using the Global Paths from paths.py
>>> create_folder('/data/')
"""
location_to_save = base_path / directory
# Recursive directory creation function
location_to_save.mkdir(parents=True, exist_ok=True)
if rtn_path:
return location_to_save.resolve() | 7c3724b009ef03fc6aa4fbc2bf9da2cbfa4c784d | 23,718 |
import numpy
def extract_track_from_cube(nemo_cube, track_cube, time_pad, dataset_id,
nn_finder=None):
"""
Extract surface track from NEMO 2d cube
"""
# crop track time
st = ga.get_cube_datetime(nemo_cube, 0)
et = ga.get_cube_datetime(nemo_cube, -1)
# NOTE do not include start instant to have non-overlapping windows
target = ga.constrain_cube_time(
track_cube, st - time_pad, et + time_pad, include_start=False
)
def find_nearest_index(src, dst, coord_name):
src_arr = src.coord(coord_name).points
dst_arr = dst.coord(coord_name).points
time_tree = cKDTree(src_arr[:, numpy.newaxis])
d, index = time_tree.query(dst_arr[:, numpy.newaxis], k=1)
return index
if nn_finder is None:
nn_finder = NearestNeighborLatLon(nemo_cube[0, ...])
target_lat = target.coord('latitude').points
target_lon = target.coord('longitude').points
i_lat, i_lon = nn_finder.search(target_lon, target_lat)
ntime = len(nemo_cube.coord('time').points)
if ntime == 1:
i_time = numpy.zeros_like(i_lat)
else:
i_time = find_nearest_index(nemo_cube, target, 'time')
values = nemo_cube.data[i_time, i_lat, i_lon]
sname = ga.nemo_reader.map_nemo_sname_to_standard[nemo_cube.standard_name]
cube = iris.cube.Cube(values, standard_name=sname, units=nemo_cube.units)
# copy coordinates
cube.add_dim_coord(target.coord('time'), 0)
cube.add_aux_coord(target.coord('latitude'), 0)
cube.add_aux_coord(target.coord('longitude'), 0)
cube.add_aux_coord(target.coord('depth'))
for coord_name in ['time', 'latitude', 'longitude', 'depth']:
cube.coord(coord_name).attributes = {} # discard coord attributes
# add attributes
cube.attributes['location_name'] = target.attributes['location_name']
cube.attributes['dataset_id'] = dataset_id
return cube | ebe226ee7fca3507cebd2d936ef2419c2ec7413a | 23,720 |
import re
def get_mean_series_temp(log_frame: pd.DataFrame):
"""Get temperature time series as mean over CPU cores."""
columns_temp = [c for c in log_frame.columns if re.fullmatch(r"Temp:Core\d+,0", c)]
values_temp = log_frame[columns_temp].mean(axis=1)
return values_temp | 2da22c316433460a8b9f9ec53a8e6542bd6da699 | 23,721 |
def new_channel():
"""Instantiates a dict containing a template for an empty single-point
channel.
"""
return {
"channel_name": "myChannel",
"after_last": "Goto first point",
"alternate_direction": False,
"equation": "x",
"final_value": 0.0,
"optimizer_config": {
"Enabled": False,
"Initial step size": 1.0,
"Max value": 1.0,
"Min value": 0.0,
"Precision": 0.001,
"Start value": 0.5
},
"relation_parameters": [
{
"channel_name": "Step values",
"lookup": None,
"use_lookup": False,
"variable": "x"
}
],
"show_advanced": False,
"step_items": [
{
"center": 0.0,
"interp": "Linear",
"n_pts": 1,
"range_type": "Single",
"single": 1.0,
"span": 0.0,
"start": 1.0,
"step": 0.0,
"step_type": "Fixed # of pts",
"stop": 1.0,
"sweep_rate": 0.0
}
],
"step_unit": "Instrument",
"sweep_mode": "Off",
"sweep_rate_outside": 0.0,
"use_outside_sweep_rate": False,
"use_relations": False,
"wait_after": 0.0
} | af05dfda58a0e14f7448f59b057546728dbbeba7 | 23,722 |
from typing import Optional
def Log1p(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().Log1pVertex, label, cast_to_vertex(input_vertex)) | fddb06841e528ed7014ef75ecab3354d53e4b901 | 23,723 |
def test(request):
"""
Controller for the app home page.
"""
context = {}
return render(request, 'ueb_app/test.html', context) | 3d578e9acbcdec1467162f22d71e1c01979ed778 | 23,724 |
def get_node_backups(request, queryset):
"""
Return dict with backups attribute.
"""
user_order_by, order_by = get_order_by(request, api_view=VmBackupList,
db_default=('-id',), user_default=('-created',))
bkps = get_pager(request, queryset.order_by(*order_by), per_page=50)
return {
'order_by': user_order_by,
'pager': bkps,
'backups': bkps,
'backups_count': bkps.paginator.count,
'backups_size': queryset.exclude(size__isnull=True).aggregate(Sum('size')).get('size__sum'),
} | 5c5c92b1221037805182efeed6da38d413aa5f16 | 23,726 |
def xpath(elt, xp, ns, default=None):
"""Run an xpath on an element and return the first result. If no results
were returned then return the default value."""
res = elt.xpath(xp, namespaces=ns)
if len(res) == 0: return default
else: return res[0] | 2252a15d621d01b58c42790622ffa66022e90dac | 23,727 |
def check_response_stimFreeze_delays(data, **_):
""" Checks that the time difference between the visual stimulus freezing and the
response is positive and less than 100ms.
Metric: M = (stimFreeze_times - response_times)
Criterion: 0 < M < 0.100 s
Units: seconds [s]
:param data: dict of trial data with keys ('stimFreeze_times', 'response_times', 'intervals',
'choice')
"""
# Calculate the difference between stimOn and goCue times.
# If either are NaN, the result will be Inf to ensure that it crosses the failure threshold.
metric = np.nan_to_num(data["stimFreeze_times"] - data["response_times"], nan=np.inf)
# Test for valid values
passed = ((metric < 0.1) & (metric > 0)).astype(float)
# Finally remove no_go trials (stimFreeze triggered differently in no_go trials)
# These values are ignored in calculation of proportion passed
passed[data["choice"] == 0] = np.nan
assert data["intervals"].shape[0] == len(metric) == len(passed)
return metric, passed | 9abe61acd4ce085eb6e9f7b7deb06f6a6bcb8a46 | 23,729 |
import vtool.keypoint as ktool
def get_invVR_aff2Ds(kpts, H=None):
"""
Returns matplotlib keypoint transformations (circle -> ellipse)
Example:
>>> # Test CV2 ellipse vs mine using MSER
>>> import vtool as vt
>>> import cv2
>>> import wbia.plottool as pt
>>> img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='zebra.png'))
>>> imgBGR = vt.imread(img_fpath)
>>> imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)
>>> mser = cv2.MSER_create()
>>> regions, bboxs = mser.detectRegions(imgGray)
>>> region = regions[0]
>>> bbox = bboxs[0]
>>> vis = imgBGR.copy()
>>> vis[region.T[1], region.T[0], :] = 0
>>> hull = cv2.convexHull(region.reshape(-1, 1, 2))
>>> cv2.polylines(vis, [hull], 1, (0, 255, 0))
>>> ell = cv2.fitEllipse(region)
>>> cv2.ellipse(vis, ell, (255))
>>> ((cx, cy), (rx, ry), degrees) = ell
>>> # Convert diameter to radians
>>> rx /= 2
>>> ry /= 2
>>> # Make my version of ell
>>> theta = np.radians(degrees) # opencv lives in radians
>>> S = vt.scale_mat3x3(rx, ry)
>>> T = vt.translation_mat3x3(cx, cy)
>>> R = vt.rotation_mat3x3(theta)
>>> #R = np.eye(3)
>>> invVR = T.dot(R).dot(S)
>>> kpts = vt.flatten_invV_mats_to_kpts(np.array([invVR]))
>>> pt.imshow(vis)
>>> # MINE IS MUCH LARGER (by factor of 2)) WHY?
>>> # we start out with a unit circle not a half circle
>>> pt.draw_keypoints(pt.gca(), kpts, pts=True, ori=True, eig=True, rect=True)
"""
# invVR_mats = ktool.get_invV_mats(kpts, with_trans=True, with_ori=True)
invVR_mats = ktool.get_invVR_mats3x3(kpts)
if H is None:
invVR_aff2Ds = [mpl.transforms.Affine2D(invVR) for invVR in invVR_mats]
else:
invVR_aff2Ds = [HomographyTransform(H.dot(invVR)) for invVR in invVR_mats]
return invVR_aff2Ds | c32f2d3b833ebc7212dec95f0ead393847297be7 | 23,730 |
def get_string(string_name):
"""
Gets a string from the language file
"""
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return string_name | 18ed37668394e40bf70110d9dd26f2a739a6e2e3 | 23,732 |
import math
import logging
def build_streambed(x_max, set_diam):
""" Build the bed particle list.
Handles calls to add_bed_particle, checks for
completness of bed and updates the x-extent
of stream when the packing exceeds/under packs
within 8mm range.
Note: the updates to x-extent are only required
when variable particle diameter is being used.
Return values:
bed_particles -- list of bed particles
bed_vertices -- list of available vertices
based on bed list
"""
max_particles = int(math.ceil( x_max / set_diam ))
bed_particles = np.zeros([max_particles, 7],dtype=float)
running_id = 0
running_pack_idx = 0
# This probably doesn't need to be a loop. NumPy!
while True:
running_id, running_pack_idx = add_bed_particle(set_diam,
bed_particles,
running_id,
running_pack_idx)
if bed_complete(running_pack_idx, x_max):
break
else: continue
# Bed packing does not always match x_max. Adjust if off
bed_max = int(math.ceil(bed_particles[running_id-1][1]
+ bed_particles[running_id-1][3]))
if x_max != bed_max:
msg = (
f'Bed packing could not match x_max parameter... Updating '
f'x_max to match packing extent: {bed_max}....'
)
logging.warning(msg)
x_max = bed_max
else: x_max = x_max
# strip zero element particles tuples from the original array
valid = ((bed_particles==0).all(axis=(1)))
bed_particles = bed_particles[~valid]
return bed_particles, x_max | 1a4093ebf31b2f19c1144c332addaf5dadad5eee | 23,733 |
def rotate_around_point_highperf_Numpy(xy, radians, origin):
"""
Rotate a point around a given point.
I call this the "high performance" version since we're caching some
values that are needed >1 time. It's less readable than the previous
function but it's faster.
"""
adjust_xy = xy - origin
rotate_matrix_X = np.array( (np.cos(radians), np.sin(radians)) )
rotate_matrix_Y = np.array( (-np.sin(radians), np.cos(radians)) )
rotate_xy = origin + np.array( (sum(adjust_xy * rotate_matrix_X), sum(adjust_xy * rotate_matrix_Y)) )
return rotate_xy | 068651134692976e01530a986d6257a45939d741 | 23,734 |
def eval(cfg, env, agent):
"""
Do the evaluation of the current agent
:param cfg: configuration of the agent
:param env:
:param agent:
:return:
"""
print("========= Start to Evaluation ===========")
print("Environment:{}, Algorithm:{}".format(cfg.env, cfg.algo))
for i_episode in range(cfg.eval_eps):
temp_ep_reward = 0
state = env.reset()
while True:
action = agent.predict(state)
next_state, reward, done, _ = env.step(action)
state = next_state
temp_ep_reward += reward
if done:
break
rewards.append(temp_ep_reward)
if ma_rewards:
ma_rewards.append(ma_rewards[-1]*0.9+temp_ep_reward*0.1)
else:
ma_rewards.append(temp_ep_reward)
print("Episode:{}/{} : reward:{:.1f}".format(i_episode, cfg.eval_eps, temp_ep_reward))
print("============ Evaluation Complete =================")
return rewards, ma_rewards | f0f5f2bf4eabba13fabfd782de53f8a5ef0db982 | 23,735 |
def phi(input):
"""Phi function.
:param input:
Float (scalar or array) value.
:returns:
phi(input).
"""
return 0.5 * erfc(-input/np.sqrt(2)) | fd9988c4257c82697a46bee71eb1e67aab286353 | 23,736 |
def _is_correct_task(task: str, db: dict) -> bool:
"""
Check if the current data set is compatible with the specified task.
Parameters
----------
task
Regression or classification
db
OpenML data set dictionary
Returns
-------
bool
True if the task and the data set are compatible
"""
if task == "classification":
return db['NumberOfSymbolicFeatures'] == 1 and db['NumberOfClasses'] > 0
elif task == "regression":
return True
else:
return False | 49790d8e2b7a16ee9b3ca9c8bc6054fde28b3b6f | 23,737 |
import re
def is_valid_semver(version: str) -> bool:
"""return True if a value is a valid semantic version
"""
match = re.match(r'^[0-9]+\.[0-9]+\.[0-9]+(-([0-9a-z]+(\.[0-9a-z]+)*))?$', version)
return match is not None | 811a29a497515d23169916b9d9450fed6364c966 | 23,738 |
from typing import Optional
from typing import List
async def role_assignments_for_team(
name: str, project_name: Optional[str] = None
) -> List[RoleAssignment]:
"""Gets all role assignments for a team."""
try:
return zen_store.get_role_assignments_for_team(
team_name=name, project_name=project_name
)
except KeyError as error:
raise not_found(error) from error | 3ba5336882978109e4333aead0bf8d5990a52880 | 23,739 |
def set_nested_dict_value(input_dict, key, val):
"""Uses '.' or '->'-splittable string as key and returns modified dict."""
if not isinstance(input_dict, dict):
# dangerous, just replace with dict
input_dict = {}
key = key.replace("->", ".") # make sure no -> left
split_key = key.split('.', 1)
if len(split_key) == 2:
key_prefix, key_suffix = split_key[0], split_key[1]
if key_prefix not in input_dict:
input_dict[key_prefix] = {}
input_dict[key_prefix] = set_nested_dict_value(
input_dict[key_prefix], key_suffix, val)
else: # not enough values to unpack
input_dict[key] = val
return input_dict | 2f2a160348b0c5d5fac955a8c6cec6c0ec0d5f0d | 23,740 |
from unittest.mock import Mock
def cube_1(cube_mesh):
""" Viewable cube object shifted to 3 on x """
obj = Mock()
obj.name = 'cube_1'
obj.mode = 'OBJECT'
obj.mesh_mock = cube_mesh
obj.to_mesh.return_value = cube_mesh
obj.matrix_world = Matrix.Identity(4)
obj.mesh_mock.vertices = cube_vertices(3)
obj.update_from_editmode = Mock()
obj.evaluated_get = lambda s: s
obj.visible_get.return_value = False
obj.hide_viewport = False
obj.hide_render = True
obj.children = None
return obj | 7d60199dcf41a818346e91014b4f041ab14313da | 23,741 |
def deserialize_model_fixture():
"""
Returns a deserialized version of an instance of
the Model class. This simulates the idea that a
model instance would be serialized and loaded
from disk.
"""
class Model:
def predict(self, values):
return [1]
return Model() | 946e0cc67e4cb14da9b08e6790d336126bb9e43a | 23,742 |
def _get_bfp_op(op, name, bfp_args):
"""
Create the bfp version of the operation op
This function is called when a bfp layer is defined. See BFPConv2d and BFPLinear below
"""
op_name = _get_op_name(name, **bfp_args)
if op_name not in _bfp_ops:
_bfp_ops[name] = _gen_bfp_op(op, name, bfp_args)
return _bfp_ops[name] | 27cac342cbb30159ce7d0bbda8c42df4cefea118 | 23,743 |
from typing import Sequence
def compute_dmdt(jd: Sequence, mag: Sequence, dmdt_ints_v: str = "v20200318"):
"""Compute dmdt matrix for time series (jd, mag)
See arXiv:1709.06257
:param jd:
:param mag:
:param dmdt_ints_v:
:return:
"""
jd_diff = pwd_for(jd)
mag_diff = pwd_for(mag)
dmdt, ex, ey = np.histogram2d(
jd_diff,
mag_diff,
bins=[
DMDT_INTERVALS[dmdt_ints_v]["dt_intervals"],
DMDT_INTERVALS[dmdt_ints_v]["dm_intervals"],
],
)
dmdt = np.transpose(dmdt)
norm = np.linalg.norm(dmdt)
if norm != 0.0:
dmdt /= np.linalg.norm(dmdt)
else:
dmdt = np.zeros_like(dmdt)
return dmdt | af6f7c59de8ec7b38f22f3ffa5e3d17641b9ed32 | 23,744 |
def all_bin_vecs(arr, v):
"""
create an array which holds all 2^V binary vectors
INPUT
arr positive integers from 1 to 2^V, (2^V, ) numpy array
v number of variables V
OUTPUT
edgeconfs all possible binary vectors, (2^V, V) numpy array
"""
to_str_func = np.vectorize(lambda x: np.binary_repr(x).zfill(v))
strs = to_str_func(arr)
edgeconfs = np.zeros((arr.shape[0], v), dtype=np.int8)
for bit_ix in range(0, v):
fetch_bit_func = np.vectorize(lambda x: x[bit_ix] == '1')
edgeconfs[:,bit_ix] = fetch_bit_func(strs)[:,0]
return edgeconfs | 1844545f85a1404a0c2bcb094e28e993e369f6df | 23,745 |
def unpack_domains(df):
"""Unpack domain codes to values.
Parameters
----------
df : DataFrame
"""
df = df.copy()
for field, domain in DOMAINS.items():
if field in df.columns:
df[field] = df[field].map(domain)
return df | 9c6c9607439aa24e944d9a8055e741ae3454d0cb | 23,746 |
def generate_region_info(region_params):
"""Generate the `region_params` list in the tiling parameter dict
Args:
region_params (dict):
A `dict` mapping each region-specific parameter to a list of values per FOV
Returns:
list:
The complete set of `region_params` sorted by region
"""
# define the region params list
region_params_list = []
# iterate over all the region parameters, all parameter lists are the same length
for i in range(len(region_params['region_start_row'])):
# define a dict containing all the region info for the specific FOV
region_info = {
rp: region_params[rp][i] for rp in region_params
}
# append info to region_params
region_params_list.append(region_info)
return region_params_list | aa80e1e4ea9693b362fa18a435d886a09ecff533 | 23,748 |
def is_decorator(tree, fname):
"""Test tree whether it is the decorator ``fname``.
``fname`` may be ``str`` or a predicate, see ``isx``.
References of the forms ``f``, ``foo.f`` and ``hq[f]`` are supported.
We detect:
- ``Name``, ``Attribute`` or ``Captured`` matching the given ``fname``
(non-parametric decorator), and
- ``Call`` whose ``.func`` matches the above rule (parametric decorator).
"""
return isx(tree, fname) or \
(type(tree) is Call and isx(tree.func, fname)) | f4fdd760aefae9c1be3d40cc249b242e0be65db5 | 23,749 |
def Nbspld1(t, x, k=3):
"""Same as :func:`Nbspl`, but returns the first derivative too."""
kmax = k
if kmax > len(t)-2:
raise Exception("Input error in Nbspl: require that k < len(t)-2")
t = np.array(t)
x = np.array(x)[:, np.newaxis]
N = 1.0*((x > t[:-1]) & (x <= t[1:]))
dN = np.zeros_like(N)
for k in xrange(1, kmax+1):
dt = t[k:] - t[:-k]
_dt = dt.copy()
_dt[dt != 0] = 1./dt[dt != 0]
dN = dN[:,:-1]*(x-t[:-k-1])*_dt[:-1] - dN[:,1:]*(x-t[k+1:])*_dt[1:]
dN += N[:,:-1]*_dt[:-1] - N[:,1:]*_dt[1:]
N = N[:,:-1]*(x-t[:-k-1])*_dt[:-1] - N[:,1:]*(x-t[k+1:])*_dt[1:]
return N, dN | f2535888715ec28c2b089c7f92b692b14c26bea7 | 23,750 |
def getStyleSheet():
"""Returns a stylesheet object"""
stylesheet = StyleSheet1()
stylesheet.add(ParagraphStyle(name='Normal',
fontName="Helvetica",
fontSize=10,
leading=12))
stylesheet.add(ParagraphStyle(name='BodyText',
parent=stylesheet['Normal'],
spaceBefore=14))
stylesheet.add(ParagraphStyle(name='Bold',
parent=stylesheet['BodyText'],
fontName="Helvetica-Bold"))
return stylesheet | fcdb8cc7792254c4c7fb6a55333ad037c914b647 | 23,751 |
def parse_faq_entries(entries):
"""
Iterate through the condensed FAQ entries to expand all of the keywords and answers
"""
parsed_entries = {}
for entry in entries:
for keyword in entry["keywords"]:
if keyword not in parsed_entries:
parsed_entries[keyword] = entry["answer"]
else:
print("Error: Found duplicate keyword '{}' in pre-configured FAQ entries.".format(keyword))
exit(1)
return parsed_entries | 5258802d9384502f8a00692080cc9ae6ae7e9591 | 23,752 |
from datetime import datetime
def dh_to_dt(day_str, dh):
"""decimal hour to unix timestamp"""
# return dt.replace(tzinfo=datetime.timezone.utc).timestamp()
t0 = datetime.datetime.strptime(day_str, '%Y%m%d') - datetime.datetime(1970, 1, 1)
return datetime.datetime.strptime(day_str, '%Y%m%d') + datetime.timedelta(seconds=float(dh*3600)) | f87ec634f49400c178b6cad84f50426f67342868 | 23,753 |
from typing import Sequence
from typing import Union
from pathlib import Path
def run(cmd: Sequence[Union[str, Path]], check=True) -> int:
"""Run arbitrary command as subprocess"""
returncode = run_subprocess(
cmd, capture_stdout=False, capture_stderr=False
).returncode
if check and returncode:
cmd_str = " ".join(str(c) for c in cmd)
raise PipxError(f"{cmd_str!r} failed")
return returncode | 985eea94264b72db88ae23ebcfdb2d7413390488 | 23,755 |
def getDict(fname):
"""Returns the dict of values of the UserComment"""
s = getEXIF(fname, COMMENT_TAG)
try:
s = s.value
except Exception: pass
return getDictFromString(s) | 9601103a03a97964b2b29379ce21e6710de6a376 | 23,756 |
from hetmatpy.degree_weight import default_dwwc_method
import inspect
import functools
import time
def path_count_cache(metric):
"""
Decorator to apply caching to the DWWC and DWPC functions from
hetmatpy.degree_weight.
"""
def decorator(user_function):
signature = inspect.signature(user_function)
@functools.wraps(user_function)
def wrapper(*args, **kwargs):
bound_args = signature.bind(*args, **kwargs)
bound_args.apply_defaults()
arguments = bound_args.arguments
graph = arguments["graph"]
metapath = graph.metagraph.get_metapath(arguments["metapath"])
arguments["metapath"] = metapath
damping = arguments["damping"]
cached_result = None
start = time.perf_counter()
supports_cache = (
isinstance(graph, hetmatpy.hetmat.HetMat) and graph.path_counts_cache
)
if supports_cache:
cache_key = {"metapath": metapath, "metric": metric, "damping": damping}
cached_result = graph.path_counts_cache.get(**cache_key)
if cached_result:
row_names, col_names, matrix = cached_result
matrix = sparsify_or_densify(matrix, arguments["dense_threshold"])
matrix = matrix.astype(arguments["dtype"])
if cached_result is None:
if arguments["dwwc_method"] is None:
# import default_dwwc_method here to avoid circular dependencies
arguments["dwwc_method"] = default_dwwc_method
row_names, col_names, matrix = user_function(**arguments)
if supports_cache:
runtime = time.perf_counter() - start
graph.path_counts_cache.set(**cache_key, matrix=matrix, runtime=runtime)
return row_names, col_names, matrix
return wrapper
return decorator | 0872b15d52fef0289a72d87632c95a676291dffb | 23,757 |
from typing import Mapping
from typing import Set
import tqdm
def get_metabolite_mapping() -> Mapping[str, Set[Reference]]:
"""Make the metabolite mapping."""
metabolites_df = get_metabolite_df()
smpdb_id_to_metabolites = defaultdict(set)
for pathway_id, metabolite_id, metabolite_name in tqdm(metabolites_df.values, desc='mapping metabolites'):
smpdb_id_to_metabolites[pathway_id].add(Reference(
prefix=PREFIX, identifier=metabolite_id, name=metabolite_name,
))
return smpdb_id_to_metabolites | ceca1f2bfc993249d9424abec0c5e67b1d456af4 | 23,758 |
def has_merge_conflict(commit: str, target_branch: str, remote: str = 'origin') -> bool:
""" Returns true if the given commit hash has a merge conflict with the given target branch.
"""
try:
# Always remove the temporary worktree. It's possible that we got
# interrupted and left it around. This will raise an exception if the
# worktree doesn't exist, which can be safely ignored.
git('worktree', 'remove', '--force', '.git/temp-worktree',
stdout=get_dev_null(), stderr=get_dev_null())
except GitError:
pass
git('worktree', 'add', '.git/temp-worktree', f'{remote}/{target_branch}', '--detach',
stdout=get_dev_null(), stderr=get_dev_null())
try:
git('merge', '--no-commit', commit,
git_dir='.git/temp-worktree', stdout=get_dev_null(), stderr=get_dev_null())
return False
except GitError:
return True
finally:
git('worktree', 'remove', '--force', '.git/temp-worktree',
stdout=get_dev_null(), stderr=get_dev_null()) | 2136f1b60201bd33c3e854ed4df372e0196ea62f | 23,759 |
def load_csr(data):
"""
Loads a PEM X.509 CSR.
"""
return x509.load_pem_x509_csr(data, default_backend()) | edf07190243d7990d2782df240044572243f770b | 23,761 |
def parseIMACS(hdul):
"""
Parses information from a given HDU, for data produced at IMACS
"""
start = hdul[0].header['CRVAL1']
step = hdul[0].header['CDELT1']
total = hdul[0].header['NAXIS1']
corr = (hdul[0].header['CRPIX1'] - 1) * step
wave = np.arange(start - corr, start + total*step - corr, step)
wave = np.reshape(wave, (1, wave.shape[0]))
flux = np.reshape(hdul[0].data, (1, hdul[0].data.shape[0]))
error = flux * .1
return (wave, flux, error) | 35d45a5842977d71375eaa9d07df6051d45ed075 | 23,762 |
def boll_cross_func_jit(data:np.ndarray,) -> np.ndarray:
"""
布林线和K线金叉死叉 状态分析 Numba JIT优化
idx: 0 == open
1 == high
2 == low
3 == close
"""
BBANDS = TA_BBANDS(data[:,3], timeperiod=20, nbdevup=2)
return ret_boll_cross | 8fc68429f5ea94e462327fa57926742161d49911 | 23,763 |
from cuml.linear_model import LogisticRegression
def rank_genes_groups(
X,
labels, # louvain results
var_names,
groups=None,
reference='rest',
n_genes=100,
**kwds,
):
"""
Rank genes for characterizing groups.
Parameters
----------
X : cupy.ndarray of shape (n_cells, n_genes)
The cellxgene matrix to rank genes
labels : cudf.Series of size (n_cells,)
Observations groupings to consider
var_names : cudf.Series of size (n_genes,)
Names of genes in X
groups : Iterable[str] (default: 'all')
Subset of groups, e.g. ['g1', 'g2', 'g3'], to which comparison
shall be restricted, or 'all' (default), for all groups.
reference : str (default: 'rest')
If 'rest', compare each group to the union of the rest of the group.
If a group identifier, compare with respect to this group.
n_genes : int (default: 100)
The number of genes that appear in the returned tables.
"""
#### Wherever we see "adata.obs[groupby], we should just replace w/ the groups"
# for clarity, rename variable
if groups == 'all':
groups_order = 'all'
elif isinstance(groups, (str, int)):
raise ValueError('Specify a sequence of groups')
else:
groups_order = list(groups)
if isinstance(groups_order[0], int):
groups_order = [str(n) for n in groups_order]
if reference != 'rest' and reference not in set(groups_order):
groups_order += [reference]
if (
reference != 'rest'
and reference not in set(labels.cat.categories)
):
cats = labels.cat.categories.tolist()
raise ValueError(
f'reference = {reference} needs to be one of groupby = {cats}.'
)
groups_order, groups_masks = select_groups(labels, groups_order)
original_reference = reference
n_vars = len(var_names)
# for clarity, rename variable
n_genes_user = n_genes
# make sure indices are not OoB in case there are less genes than n_genes
if n_genes_user > X.shape[1]:
n_genes_user = X.shape[1]
# in the following, n_genes is simply another name for the total number of genes
n_genes = X.shape[1]
n_groups = groups_masks.shape[0]
ns = cp.zeros(n_groups, dtype=int)
for imask, mask in enumerate(groups_masks):
ns[imask] = cp.where(mask)[0].size
if reference != 'rest':
ireference = cp.where(groups_order == reference)[0][0]
reference_indices = cp.arange(n_vars, dtype=int)
rankings_gene_scores = []
rankings_gene_names = []
# Perform LogReg
# if reference is not set, then the groups listed will be compared to the rest
# if reference is set, then the groups listed will be compared only to the other groups listed
reference = groups_order[0]
if len(groups) == 1:
raise Exception('Cannot perform logistic regression on a single cluster.')
grouping_mask = labels.astype('int').isin(cudf.Series(groups_order).astype('int'))
grouping = labels.loc[grouping_mask]
X = X[grouping_mask.values, :] # Indexing with a series causes issues, possibly segfault
y = labels.loc[grouping]
clf = LogisticRegression(**kwds)
clf.fit(X.get(), grouping.to_array().astype('float32'))
scores_all = cp.array(clf.coef_).T
for igroup, group in enumerate(groups_order):
if len(groups_order) <= 2: # binary logistic regression
scores = scores_all[0]
else:
scores = scores_all[igroup]
partition = cp.argpartition(scores, -n_genes_user)[-n_genes_user:]
partial_indices = cp.argsort(scores[partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
rankings_gene_scores.append(scores[global_indices].get()) ## Shouldn't need to take this off device
rankings_gene_names.append(var_names[global_indices].to_pandas())
if len(groups_order) <= 2:
break
groups_order_save = [str(g) for g in groups_order]
if (len(groups) == 2):
groups_order_save = [g for g in groups_order if g != reference]
scores = np.rec.fromarrays(
[n for n in rankings_gene_scores],
dtype=[(rn, 'float32') for rn in groups_order_save],
)
names = np.rec.fromarrays(
[n for n in rankings_gene_names],
dtype=[(rn, 'U50') for rn in groups_order_save],
)
return scores, names, original_reference | bd2230d2be098677f62a46becd766edcc1fea36f | 23,764 |
def init_graph_handler():
"""Init GraphHandler."""
graph = get_graph_proto()
graph_handler = GraphHandler()
graph_handler.put({graph.name: graph})
return graph_handler | 66b7f9d0b30c435fc3e6fe1152b24d663c31ac6e | 23,765 |
def add_average_column(df, *, copy: bool = False):
"""Add a column averaging the power on all channels.
Parameters
----------
%(df_psd)s
An 'avg' column is added averaging the power on all channels.
%(copy)s
Returns
-------
%(df_psd)s
The average power across channels has been added in the column 'avg'.
"""
_check_type(copy, (bool,), item_name="copy")
df = df.copy() if copy else df
ch_names = [
col
for col in df.columns
if col not in ("participant", "session", "run", "phase", "idx")
]
df["avg"] = df[ch_names].mean(axis=1)
return df | 0ff995d660ba71bd42ea7ae886b79631e3bd4509 | 23,766 |
import fnmatch
def _is_globbed(name, glob):
"""
Return true if given name matches the glob list.
"""
if not glob:
return True
return any((fnmatch.fnmatchcase(name, i) for i in glob)) | 305116367884c8acc9c6f52a73c2cb116abaadbe | 23,767 |
import struct
def read_vec_flt(file_or_fd):
"""[flt-vec] = read_vec_flt(file_or_fd)
Read kaldi float vector, ascii or binary input,
Parameters
----------
file_or_fd : obj
An ark, gzipped ark, pipe or opened file descriptor.
Raises
------
ValueError
Unsupported data-type of the input file.
"""
fd = open_or_fd(file_or_fd)
binary = fd.read(2)
if binary == b"\0B": # binary flag
# Data type,
type = fd.read(3)
if type == b"FV ":
sample_size = 4 # floats
if type == b"DV ":
sample_size = 8 # doubles
assert sample_size > 0
# Dimension,
assert fd.read(1) == b"\4" # int-size
vec_size = struct.unpack("<i", fd.read(4))[0] # vector dim
# Read whole vector,
buf = fd.read(vec_size * sample_size)
if sample_size == 4:
ans = np.frombuffer(buf, dtype="float32")
elif sample_size == 8:
ans = np.frombuffer(buf, dtype="float64")
else:
raise ValueError("BadSampleSize")
return ans
else: # ascii,
arr = (binary + fd.readline()).strip().split()
try:
arr.remove("[")
arr.remove("]") # optionally
except ValueError:
pass
ans = np.array(arr, dtype=float)
if fd is not file_or_fd:
fd.close() # cleanup
return ans | f12218f029e18a91666b99e9994ba29d67d62d5a | 23,768 |
def arg_export(name):
"""Export an argument set."""
def _wrapper(func):
_ARG_EXPORTS[name] = func
if 'arg_defs' not in dir(func):
func.arg_defs = []
return func
return _wrapper | a713b22a7fffda50f8a9581362d8fd5ca807cef3 | 23,769 |
from typing import OrderedDict
def get_od_base( mode = "H+S & B3LYP+TPSS0"): # od is OrderedDict()
"""
initial parameters are prepared.
mode = "H+S & B3LYP+TPSS0" --> ["B3LYP", "TPSS0"] with speration of H and S
"H+S & B3LYP" --> ["B3LYP"] with speration of H and S
"H+S & TPSSO" --> ["TPSS0"] with speration of H and S
"""
if mode == "H+S&B3LYP+TPSS0":
od = OrderedDict()
od['QC Models (Family ID)'] = [["B3LYP", "TPSS0"]]
od['H + S'] = [True]
od['CV Mode'] = ['10*5KF/LOO']
od['Em type'] = ['Chemical potential']
od['Regularization'] = ['None']
od['Bounds/Constraints'] = ['None']
aod = OrderedDict()
aod['in_file'] = "sheet/EmBT-xM4.csv"
aod['out_file'] = "sheet/out_" + mode + ".csv"
else:
raise ValueError("Not supported: {}".format( mode))
return od, aod | 6a7aa100d8d244d9a0606a08188153e95a0df44b | 23,770 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.