content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_public_suffix (domain):
""" get_public_suffix("www.example.com") -> "example.com"
Calling this function with a DNS name will return the
public suffix for that name.
Note that if the input does not contain a valid TLD,
e.g. "xxx.residential.fw" in which "fw" is not a valid TLD,
the returned public suffix will be "fw", and TLD will be empty
Note that for internationalized domains the list at
http://publicsuffix.org uses decoded names, so it is
up to the caller to decode any Punycode-encoded names.
"""
global Root, Domain_to_t2ld_cache
try:
return Domain_to_t2ld_cache [domain]
except KeyError:
parts = domain.lower().lstrip('.').split('.')
hits = [None] * len(parts)
_lookup_node (hits, 1, Root, parts)
for i, what in enumerate(hits):
if what is not None and what == 0:
t2ld = '.'.join(parts[i:])
Domain_to_t2ld_cache [domain] = t2ld
return t2ld
|
8982df4677f1a1853fa328973cfc00c17796e3d8
| 18,800 |
from scipy.interpolate import interp1d
def interpol(data,x):
"""
Resamples data by given factor with interpolation
"""
# Resamples data by given factor by interpolation
x0 = np.linspace(0, len(data)-1, len(data))
x1 = np.linspace(0, len(data)-1, len(data)*x-(x-1))
f = interp1d(x0, data)
return f(x1)
|
85cb9c9d776abc8317edbf1df5935e78ac774c02
| 18,801 |
import torch
def convert_to_torch_tensors(X_train, y_train, X_test, y_test):
""" Function to quickly convert datasets to pytorch tensors """
# convert training data
_X_train = torch.LongTensor(X_train)
_y_train = torch.FloatTensor(y_train)
# convert test data
_X_test = torch.LongTensor(X_test)
_y_test = torch.FloatTensor(y_test)
# return the tensors
return _X_train, _y_train, _X_test, _y_test
|
0d40fe19c977b25e3a2571adc98790d7058a77d9
| 18,802 |
def api_auth(func):
"""
If the user is not logged in, this decorator looks for basic HTTP auth
data in the request header.
"""
@wraps(func)
def _decorator(request, *args, **kwargs):
authentication = APIAuthentication(request)
if authentication.authenticate():
return func(request, *args, **kwargs)
raise Http404
return _decorator
|
624c997dae9da9b698b1dccbd5293027d54d0fc8
| 18,803 |
def object_miou(y_true, y_pred, num_classes=cfg.num_classes):
"""
衡量图中目标的iou
:param y_true: 标签
:param y_pred: 预测
:param num_classes: 分类数量
:return: miou
"""
confusion_matrix = get_confusion_matrix(y_true, y_pred, num_classes)
# Intersection = TP Union = TP + FP + FN
# IoU = TP / (TP + FP + FN)
# 取对角元素的值,对角线上的值可认为是TP或是交集
intersection = tf.linalg.diag_part(confusion_matrix)
# axis = 1表示混淆矩阵行的值;axis = 0表示取混淆矩阵列的值,都是返回一个一维列表,需要求和
union = tf.reduce_sum(confusion_matrix, axis=1) + tf.reduce_sum(confusion_matrix, axis=0) - intersection
intersection = intersection
union = union
iou = intersection / union # 其值为各个类别的IoU
# 避免nan
iou = tf.where(tf.math.is_nan(iou), tf.zeros_like(iou), iou)
# 不求包含背景部分的iou
miou = tf.reduce_mean(iou[1:])
return miou
|
1051fe488fb4b16760bb256265d11b33aa613743
| 18,804 |
import datetime
def post_discussion(title: str, content: str, path: str, top: bool, private: bool = False):
"""
发送讨论
参数:
title:str 讨论题目
content:str 内容
path:str 路径
top:bool 是否置顶
返回
{
"code":-1,//是否成功执行
"discussion_id":"成功执行时的讨论ID",
"message":"错误信息"
}
"""
if not session.get("uid"):
return make_response(-1, message="请登录")
user: User = User.by_id(int(session.get("uid")))
if not permission_manager.has_permission(user.id, "discussion.manage") and top:
return make_response(-1, message="只有管理员才能发置顶讨论")
if not can_post_at(user, path):
return make_response(-1, message="你无权在这里发帖")
if not title:
return make_response(-1, message="标题不得为空")
discussion = Discussion()
discussion.content = content
discussion.title = title
discussion.path = path
discussion.time = datetime.datetime.now()
discussion.top = top
discussion.uid = user.id
discussion.private = private
db.session.add(discussion)
db.session.commit()
return make_response(0, discussion_id=discussion.id)
|
b633da456a8ca05d592efb3e1dd16c8a7a465e23
| 18,805 |
import glob
import json
def logs_handler(request):
"""Return the log file on disk.
:param request: a web requeest object.
:type request: request | None
"""
log.info("Request for logs endpoint made.")
complete_log_path = 'genconf/state/complete.log'
json_files = glob.glob('genconf/state/*.json')
complete_log = []
for f in json_files:
log.debug('Adding {} to complete log file.'.format(f))
with open(f) as blob:
complete_log.append(json.loads(blob.read()))
with open(complete_log_path, 'w') as f:
f.write(json.dumps(complete_log, indent=4, sort_keys=True))
return web.HTTPFound('/download/log/complete.log'.format(VERSION))
|
d422dbc4394a31a4b92f531cc01f377020069ddd
| 18,806 |
from operator import and_
def _get_fields_usage_data(session):
"""
Obtaining metrics of field usage in lingvodoc,
the metrics are quantity of all/deleted dictionary perspectives using this field
(also with URLs) and quantity of lexical entries in such dictionary perspectives
Result:
dict {
(client_id, object_id): dict {
'URLs': list['url_string', ...],
'metrics': dict {
'dp': dict {
'sum': quantity of all parent dictionary perspectives,
'deleted': quantity of deleted parent dictionary perspectives
},
'le': dict {
'sum': quantity of lexical entries of all parent dictionary perspectives,
'deleted': quantity of lexical entries of deleted parent dictionary perspectives
}
}
}
}
"""
f_client_id = Field.client_id.label('field_client_id')
f_object_id = Field.object_id.label('field_object_id')
dp_client_id = DictionaryPerspective.client_id.label('dictionary_perspective_client_id')
dp_object_id = DictionaryPerspective.object_id.label('dictionary_perspective_object_id')
dp_marked_for_deletion = \
DictionaryPerspective.marked_for_deletion.label('dictionary_perspective_marked_for_deletion')
subquery = session.query(f_client_id, f_object_id, dp_client_id, dp_object_id, dp_marked_for_deletion)
subquery = subquery.select_from(Field).join(DictionaryPerspectiveToField,
and_(DictionaryPerspectiveToField.field_client_id == Field.client_id,
DictionaryPerspectiveToField.field_object_id == Field.object_id))
subquery = subquery.filter(DictionaryPerspective.marked_for_deletion == False,
Field.marked_for_deletion == False)
subquery = subquery.join(DictionaryPerspective,
and_(DictionaryPerspectiveToField.parent_client_id == DictionaryPerspective.client_id,
DictionaryPerspectiveToField.parent_object_id == DictionaryPerspective.object_id))
subquery = subquery.distinct(Field.client_id, Field.object_id,
DictionaryPerspective.client_id, DictionaryPerspective.object_id)
subquery = subquery.order_by(Field.client_id, Field.object_id,
DictionaryPerspective.client_id, DictionaryPerspective.object_id)
log.info(subquery)
fields_usage = dict()
try:
for data in subquery.all():
field_id = (data.field_client_id, data.field_object_id)
if not fields_usage.get(field_id, None):
fields_usage[field_id] = {
'URLs': list(),
'metrics': {
'dp': {
'sum': 0,
'deleted': 0
},
'le': {
'sum': 0,
'deleted': 0
}
}
}
fields_usage[field_id]['URLs'].append(
_dictionary_perspective_url(
data.dictionary_perspective_client_id, data.dictionary_perspective_object_id
)
)
except exc.SQLAlchemyError as ex:
log.warning('Failed to obtain fields usage URLs at ' + __name__)
log.warning(ex)
raise
subquery = subquery.subquery('subquery')
query = session.query('subquery.field_client_id', 'subquery.field_object_id',
func.count('*'), 'subquery.dictionary_perspective_marked_for_deletion')
query = query.select_from(subquery).group_by('subquery.field_client_id',
'subquery.field_object_id',
'subquery.dictionary_perspective_marked_for_deletion')
query = query.order_by('subquery.field_client_id', 'subquery.field_object_id')
log.info(query)
try:
for data in query.all():
usage = fields_usage.get((data[0], data[1]), None)
if usage:
if data[3]:
usage['metrics']['dp']['deleted'] += data[2]
usage['metrics']['dp']['sum'] += data[2]
except exc.SQLAlchemyError as ex:
log.warning('Failed to obtain fields dictionary perspective metrics at ' + __name__)
log.warning(ex)
raise
query = session.query('subquery.field_client_id', 'subquery.field_object_id',
func.count('*'), 'subquery.dictionary_perspective_marked_for_deletion')
query = query.select_from(LexicalEntry)
query = query.join(subquery, and_('subquery.dictionary_perspective_client_id = lexicalentry.parent_client_id',
'subquery.dictionary_perspective_object_id = lexicalentry.parent_object_id'))
query = query.filter('lexicalentry.marked_for_deletion = false')
query = query.group_by('subquery.field_client_id', 'subquery.field_object_id',
'subquery.dictionary_perspective_marked_for_deletion')
log.info(query)
try:
for data in query.all():
usage = fields_usage.get((data[0], data[1]), None)
if usage:
if data[3]:
usage['metrics']['le']['deleted'] += data[2]
usage['metrics']['le']['sum'] += data[2]
except exc.SQLAlchemyError as ex:
log.warning('Failed to obtain fields lexical entry metrics at ' + __name__)
log.warning(ex)
raise
return fields_usage
|
d57fb6fd0c07e22ac62ba63e5ba5b72189481aed
| 18,807 |
import os
import errno
import fnmatch
def output_is_new(output):
"""Check if the output file is up to date.
Returns:
True if the given output file exists and is newer than any of
*_defconfig, MAINTAINERS and Kconfig*. False otherwise.
"""
try:
ctime = os.path.getctime(output)
except OSError as exception:
if exception.errno == errno.ENOENT:
# return False on 'No such file or directory' error
return False
else:
raise
for (dirpath, dirnames, filenames) in os.walk(CONFIG_DIR):
for filename in fnmatch.filter(filenames, '*_defconfig'):
if fnmatch.fnmatch(filename, '.*'):
continue
filepath = os.path.join(dirpath, filename)
if ctime < os.path.getctime(filepath):
return False
for (dirpath, dirnames, filenames) in os.walk('.'):
for filename in filenames:
if (fnmatch.fnmatch(filename, '*~') or
not fnmatch.fnmatch(filename, 'Kconfig*') and
not filename == 'MAINTAINERS'):
continue
filepath = os.path.join(dirpath, filename)
if ctime < os.path.getctime(filepath):
return False
# Detect a board that has been removed since the current board database
# was generated
with open(output, encoding="utf-8") as f:
for line in f:
if line[0] == '#' or line == '\n':
continue
defconfig = line.split()[6] + '_defconfig'
if not os.path.exists(os.path.join(CONFIG_DIR, defconfig)):
return False
return True
|
a6d251799ee82fc89c0fdd0c231b1253215a2ae0
| 18,808 |
def test_merge_batch_grad_transforms_same_key_same_trafo():
"""Test merging multiple ``BatchGradTransforms`` with same key and same trafo."""
def func(t):
return t
bgt1 = BatchGradTransformsHook({"x": func})
bgt2 = BatchGradTransformsHook({"x": func})
merged = Cockpit._merge_batch_grad_transform_hooks([bgt1, bgt2])
assert len(merged._transforms.keys()) == 1
assert id(merged._transforms["x"]) == id(func)
|
10aade423092d39e6a7d754c0ceecfdb53226b53
| 18,809 |
import atexit
import time
def main(selected_ssids, sample_interval, no_header, args=None):
"""
Repeatedly check internet connection status (connected or disconnected) for given WiFi SSIDs.
Output is writen as .csv to stdout.
"""
wireless_connections = [
c for c in NetworkManager.Settings.Connections
if '802-11-wireless' in c.GetSettings().keys()
]
known_ssids = [
c.GetSettings()['802-11-wireless']['ssid']
for c in wireless_connections
]
# confirm selected ssids are available as network manager connections
for ssid in selected_ssids:
assert ssid in known_ssids, f"SSID '{ssid}' not found in network manager connections. Available SSIDs: {sorted(known_ssids)}"
# get the network manager connection objects for the selected ssids
connections = {
ssid: connection
for connection in wireless_connections for ssid in selected_ssids
if connection.GetSettings()['802-11-wireless']['ssid'] == ssid
}
# get the wireless device
wireless_devs = [
d for d in NetworkManager.NetworkManager.GetDevices()
if d.DeviceType == NetworkManager.NM_DEVICE_TYPE_WIFI
]
assert len(wireless_devs) > 0, "No wifi device found. Aborting"
wireless_dev = wireless_devs[0]
# save the current active connection, to restore once this script exits
initial_connection = wireless_dev.ActiveConnection.Connection if wireless_dev.ActiveConnection else None
def restore_initial_connection():
if initial_connection:
NetworkManager.NetworkManager.ActivateConnection(
initial_connection, wireless_dev, "/")
atexit.register(restore_initial_connection)
# write the csv header
if not no_header:
print("timestamp,ssid,device_connected,ping_successful", flush=True)
# begin logging loop.
next_log_time = time.time()
while True:
# wait for the next logging iteration
restore_initial_connection(
) # leave initial connection active while waiting
time.sleep(max(next_log_time - time.time(), 0))
next_log_time += sample_interval * 60
for ssid in selected_ssids:
# activate the connection
if wireless_dev.State == NetworkManager.NM_DEVICE_STATE_ACTIVATED:
wireless_dev.Disconnect()
NetworkManager.NetworkManager.ActivateConnection(
connections[ssid], wireless_dev, "/")
connected = wait_for_connection(wireless_dev)
if connected:
# now test internet (by pinging google)
ping_successful = ping("www.google.com")
else:
ping_successful = False
# write out result
print(
f"{time.time()},{ssid},{int(connected)},{int(ping_successful)}",
flush=True)
return 0
|
f47e13bdb994e450b8bb77e26e0da3d25014032f
| 18,810 |
def getNarrowBandULAMIMOChannel(azimuths_tx, azimuths_rx, p_gainsdB, number_Tx_antennas, number_Rx_antennas,
normalizedAntDistance=0.5, angleWithArrayNormal=0, pathPhases=None):
"""This .m file uses ULAs at both TX and RX.
- assumes one beam per antenna element
the first column will be the elevation angle, and the second column is the azimuth angle correspondingly.
p_gain will be a matrix size of (L, 1)
departure angle/arrival angle will be a matrix as size of (L, 2), where L is the number of paths
t1 will be a matrix of size (nt, nr), each
element of index (i,j) will be the received
power with the i-th precoder and the j-th
combiner in the departing and arrival codebooks
respectively
:param departure_angles: ((elevation angle, azimuth angle),) (L, 2) where L is the number of paths
:param arrival_angles: ((elevation angle, azimuth angle),) (L, 2) where L is the number of paths
:param p_gaindB: path gain (L, 1) in dB where L is the number of paths
:param number_Rx_antennas, number_Tx_antennas: number of antennas at Rx and Tx, respectively
:param pathPhases: in degrees, same dimension as p_gaindB
:return:
"""
azimuths_tx = np.deg2rad(azimuths_tx)
azimuths_rx = np.deg2rad(azimuths_rx)
# nt = number_Rx_antennas * number_Tx_antennas #np.power(antenna_number, 2)
m = np.shape(azimuths_tx)[0] # number of rays
H = np.matrix(np.zeros((number_Rx_antennas, number_Tx_antennas)))
gain_dB = p_gainsdB
path_gain = np.power(10, gain_dB / 10)
path_gain = np.sqrt(path_gain)
#generate uniformly distributed random phase in radians
if pathPhases is None:
pathPhases = 2*np.pi * np.random.rand(len(path_gain))
else:
#convert from degrees to radians
pathPhases = np.deg2rad(pathPhases)
#include phase information, converting gains in complex-values
path_complexGains = path_gain * np.exp(-1j * pathPhases)
# recall that in the narrowband case, the time-domain H is the same as the
# frequency-domain H
for i in range(m):
# at and ar are row vectors (using Python's matrix)
at = np.matrix(arrayFactorGivenAngleForULA(number_Tx_antennas, azimuths_tx[i], normalizedAntDistance,
angleWithArrayNormal))
ar = np.matrix(arrayFactorGivenAngleForULA(number_Rx_antennas, azimuths_rx[i], normalizedAntDistance,
angleWithArrayNormal))
H = H + path_complexGains[i] * ar.conj().T * at # outer product of ar Hermitian and at
#factor = (np.linalg.norm(path_complexGains) / np.sum(path_complexGains)) * np.sqrt(
# number_Rx_antennas * number_Tx_antennas) # scale channel matrix
#H *= factor # normalize for compatibility with Anum's Matlab code
return H
|
bb201abaca60e2855e86a41d9c581599b9ab0c22
| 18,811 |
def get_pybricks_reset_vector():
"""Gets the boot vector of the pybricks firmware."""
# Extract reset vector from dual boot firmware.
with open("_pybricks/firmware-dual-boot-base.bin", "rb") as pybricks_bin_file:
pybricks_bin_file.seek(4)
return pybricks_bin_file.read(4)
|
7d504e7e6e6ca444932fd61abb701a010a259254
| 18,812 |
def nSideCurve(sides=6, radius=1.0):
"""
nSideCurve( sides=6, radius=1.0 )
Create n-sided curve
Parameters:
sides - number of sides
(type=int)
radius - radius
(type=float)
Returns:
a list with lists of x,y,z coordinates for curve points, [[x,y,z],[x,y,z],...n]
(type=list)
"""
newpoints = []
step = 2.0 / sides
i = 0
while i < sides:
t = i * step
x = sin(t * pi) * radius
y = cos(t * pi) * radius
newpoints.append([x, y, 0])
i += 1
return newpoints
|
d64668ae2fdbd2dc06b36fb2523e09a8cc380d6f
| 18,813 |
def _get_corr_mat(corr_transform, n_dim):
""" Input check for the arguments passed to DirectionalSimulator"""
if corr_transform is None:
return np.eye(n_dim)
if not isinstance(corr_transform, np.ndarray) or corr_transform.ndim < 2:
err_msg = "corr_transform must be a 2-D numpy array"
raise ValueError(err_msg)
if corr_transform.shape[0] != n_dim:
err_msg = "Inconsistent number of marginal distributions and "
err_msg += "corr_transform shape"
raise ValueError(err_msg)
if corr_transform.shape[0] != corr_transform.shape[1]:
err_msg = "corr_transform must be square"
raise ValueError(err_msg)
if not (corr_transform == corr_transform.T).all():
err_msg = "corr_transform must be symmetrical"
raise ValueError(err_msg)
return corr_transform
|
94909cc43322e8eebf14942cd39817d10bd744fa
| 18,814 |
def get_flowline_routing(NHDPlus_paths=None, PlusFlow=None, mask=None,
mask_crs=None, nhdplus_crs=4269):
"""Read a collection of NHDPlus version 2 PlusFlow (routing)
tables from one or more drainage basins and consolidate into a
single pandas DataFrame, returning the `FROMCOMID` and `TOCOMID`
columns.
Parameters
----------
NHDPlus_paths : sequence
Sequence of paths to the top level folder for each drainage basin.
For example:
.. code-block:: python
['NHDPlus/NHDPlusGL/NHDPlus04',
'NHDPlus/NHDPlusMS/NHDPlus07']
by default None
PlusFlow : string or sequence
Single path to a PlusFlow table or sequence of PlusFlow table
filepaths, by default None
Returns
-------
flowline_routing : DataFrame
[description]
Raises
------
ValueError
[description]
"""
if NHDPlus_paths is not None:
flowlines_files, pfvaa_files, pf_files, elevslope_files = \
get_nhdplus_v2_filepaths(NHDPlus_paths, raise_not_exist_error=False)
pf = shp2df(pf_files)
if mask is not None:
if isinstance(mask, tuple):
extent_poly_nhd_crs = box(*mask)
filter = mask
elif mask is not None:
extent_poly_nhd_crs = read_polygon_feature(mask,
feature_crs=mask_crs,
dest_crs=nhdplus_crs)
# ensure that filter bbox is in same crs as flowlines
# get filters from shapefiles, shapley Polygons or GeoJSON polygons
filter = get_bbox(extent_poly_nhd_crs, dest_crs=nhdplus_crs)
else:
filter = None
flowlines = shp2df(flowlines_files, filter=filter)
keep_comids = pf['FROMCOMID'].isin(flowlines['COMID']) | \
pf['TOCOMID'].isin(flowlines['COMID'])
pf = pf.loc[keep_comids]
elif PlusFlow is not None:
pf = shp2df(PlusFlow)
else:
raise ValueError(("get_flowline_routing: Must provide one of more"
" NHDPlus_path or PlusFlow table."))
pf = pf.loc[pf['FROMCOMID'] != 0]
return pf[['FROMCOMID', 'TOCOMID']]
|
c79d943b35f236f9d2bddbc6c9e2f470ac6ba0fc
| 18,815 |
from typing import Callable
from typing import Optional
from datetime import datetime
import pytz
def df_wxyz(
time_slot_sensor: Sensor, test_source_a: BeliefSource, test_source_b: BeliefSource
) -> Callable[[int, int, int, int, Optional[datetime]], BeliefsDataFrame]:
"""Convenient BeliefsDataFrame to run tests on.
For a single sensor, it contains w events, for each of which x beliefs by y sources each (max 2),
described by z probabilistic values (max 3).
Note that the event resolution of the sensor is 15 minutes.
"""
sources = [test_source_a, test_source_b] # expand to increase max y
cps = [0.1587, 0.5, 0.8413] # expand to increase max z
def f(w: int, x: int, y: int, z: int, start: Optional[datetime] = None):
if start is None:
start = datetime(2000, 1, 3, 9, tzinfo=pytz.utc)
# Build up a BeliefsDataFrame with various events, beliefs, sources and probabilistic accuracy (for a single sensor)
beliefs = [
TimedBelief(
source=sources[s],
sensor=time_slot_sensor,
value=1000 * e + 100 * b + 10 * s + p,
belief_time=datetime(2000, 1, 1, tzinfo=pytz.utc) + timedelta(hours=b),
event_start=start + timedelta(hours=e),
cumulative_probability=cps[p],
)
for e in range(w) # w events
for b in range(x) # x beliefs
for s in range(y) # y sources
for p in range(z) # z cumulative probabilities
]
return BeliefsDataFrame(sensor=time_slot_sensor, beliefs=beliefs)
return f
|
64928090a7fa58cc1f6a6e4928025c426c17e799
| 18,816 |
def not_posted(child, conn) -> bool:
"""Check if a post has been already tooted."""
child_data = child["data"]
child_id = child_data["id"]
last_posts = fetch_last_posts(conn)
return child_id not in last_posts
|
5be321bf838a22cfcd742c0ddf48eb00ec1e35bf
| 18,817 |
def parse_img_name(path):
"""parse image by frame name
:param name [str]
:output img_lists
"""
code = path.split('\\')[-1].split('.')[0]
vid_id = path.split('\\')[-2]
rcp_id = path.split('\\')[-3]
seg_id = int(code[:4])
frm_id = int(code[4:])
return rcp_id, vid_id, seg_id, frm_id
|
6e0a140934c584400365f12feb8a86cfea3bbb2b
| 18,818 |
def get_bspline_kernel(x, channels, transpose=False, dtype=tf.float32, order=4):
"""Creates a 5x5x5 b-spline kernel.
Args:
num_channels: The number of channels of the image to filter.
dtype: The type of an element in the kernel.
Returns:
A tensor of shape `[5, 5, 5, num_channels, num_channels]`.
"""
mesh = x.mesh
in_dim = x.shape[-1]
num_channels = channels.size
if order == 8:
kernel = np.array(( 1., 8., 28., 56., 70., 56., 28., 8., 1.), dtype=dtype.as_numpy_dtype())
elif order == 6:
kernel = np.array(( 1., 6., 15., 20., 15., 6., 1.), dtype=dtype.as_numpy_dtype())
elif order==2:
kernel = np.array(( 1., 2., 1.), dtype=dtype.as_numpy_dtype())
else:
kernel = np.array(( 1., 4., 6., 4., 1.), dtype=dtype.as_numpy_dtype())
size = len(kernel)
kernel = np.einsum('ij,k->ijk', np.outer(kernel, kernel), kernel)
kernel /= np.sum(kernel)
kernel = kernel[:, :, :, np.newaxis, np.newaxis]
kernel = tf.constant(kernel, dtype=dtype) * tf.eye(num_channels, dtype=dtype)
fd_dim = mtf.Dimension("fd", size)
fh_dim = mtf.Dimension("fh", size)
fw_dim = mtf.Dimension("fw", size)
if transpose:
return mtf.import_tf_tensor(mesh, kernel, shape=[fd_dim, fh_dim, fw_dim, channels, in_dim])
else:
return mtf.import_tf_tensor(mesh, kernel, shape=[fd_dim, fh_dim, fw_dim, in_dim, channels])
|
1696e3a9077c672becda474de98750f45d1fe3d4
| 18,819 |
from .protocols import Stock_solution,MonoDispensing_type1,MonoDispensing_type2,MultiBase,SMTransfer,ReactionQC,QCSolubilise,DMATransfer,\
def gen_prot_dict():
"""
:param input_list:
:return:
"""
PostWorkupTransfer,Workup,PostWorkupQCAndTransfer,PostWorkupDMSOAddition,BaseT3PMulti, PoisedReactor
input_list = [Stock_solution,MonoDispensing_type1,MonoDispensing_type2,MultiBase,SMTransfer,ReactionQC,QCSolubilise,DMATransfer,PostWorkupTransfer,
Workup,PostWorkupQCAndTransfer,PostWorkupDMSOAddition,BaseT3PMulti, PoisedReactor]
out_dict = {}
for protocol in input_list:
out_dict[str(protocol())] = protocol
return out_dict
|
64c4c88f684297ea7e658015d225481005315527
| 18,820 |
def f(x):
"""
예측해야 하는 함수입니다.
"""
return np.matmul(x * np.absolute(np.sin(x)), np.array([[2], [1]]))
|
228e8f431f7c071ad1587b76c73495296e1331f3
| 18,821 |
def create_frame_coords_list(coords_path):
"""
:param coords_path: [int]
:type coords_path: list
:return: int, [int]
:rtype: tuple
"""
id_number = coords_path[0]
fr_coordinates = [None]*int((len(coords_path) - 1) / 3) # excluding the index 0 (which is the id) the number of triples is the length of this array
index = 0
for i in range(1, len(coords_path), 3):
x = coords_path[i]
y = coords_path[i + 1]
frame_number = coords_path[i + 2]
fr_coordinates[index] = FrameCoord(x, y, frame_number)
index += 1
return id_number, fr_coordinates
|
ca835c04b67789903a74e6882434c570f33647ab
| 18,822 |
import types
import sys
def parse_args():
"""
Parses command-line arguments and returns a run configuration
"""
runconfig = types.SimpleNamespace()
runconfig.ssl = False
runconfig.port = None
runconfig.connection_string = None
i = 1
try:
while i < len(sys.argv):
arg = sys.argv[i]
if arg == '-s':
if runconfig.ssl:
raise ValueError
runconfig.ssl = True
runconfig.certificate = sys.argv[i + 1]
runconfig.key = sys.argv[i + 2]
runconfig.keypassword = sys.argv[i + 3]
i += 4
elif arg == '-p':
if runconfig.port is not None:
raise ValueError
runconfig.port = int(sys.argv[i + 1])
if runconfig.port <= 0 or runconfig.port > 65536:
raise ValueError
i += 2
elif arg == '-c':
if runconfig.connection_string is not None:
raise ValueError
runconfig.connection_string = sys.argv[i + 1]
i += 2
else:
raise ValueError
if runconfig.connection_string is None:
raise ValueError
except (IndexError, ValueError):
print(USAGE)
sys.exit(1)
if runconfig.port is None:
runconfig.port = 1995
return runconfig
|
cf05d2b88c4bf9c81d107dba50d399be1cee5e7a
| 18,823 |
def arcToolReport(function=None, arcToolMessageBool=False, arcProgressorBool=False):
"""This decorator function is designed to be used as a wrapper with other GIS functions to enable basic try and except
reporting (if function fails it will report the name of the function that failed and its arguments. If a report
boolean is true the function will report inputs and outputs of a function.-David Wasserman"""
def arcToolReport_Decorator(function):
def funcWrapper(*args, **kwargs):
try:
funcResult = function(*args, **kwargs)
if arcToolMessageBool:
arcpy.AddMessage("Function:{0}".format(str(function.__name__)))
arcpy.AddMessage(" Input(s):{0}".format(str(args)))
arcpy.AddMessage(" Ouput(s):{0}".format(str(funcResult)))
if arcProgressorBool:
arcpy.SetProgressorLabel("Function:{0}".format(str(function.__name__)))
arcpy.SetProgressorLabel(" Input(s):{0}".format(str(args)))
arcpy.SetProgressorLabel(" Ouput(s):{0}".format(str(funcResult)))
return funcResult
except Exception as e:
arcpy.AddMessage(
"{0} - function failed -|- Function arguments were:{1}.".format(str(function.__name__),
str(args)))
print(
"{0} - function failed -|- Function arguments were:{1}.".format(str(function.__name__), str(args)))
print(e.args[0])
return funcWrapper
if not function: # User passed in a bool argument
def waiting_for_function(function):
return arcToolReport_Decorator(function)
return waiting_for_function
else:
return arcToolReport_Decorator(function)
|
673dd42bd96a0f5aede5ca0593efaa02d630e2e5
| 18,824 |
def check_for_pattern(input_string):
""" Check a string for a recurring pattern. If no pattern,
return False. If pattern present, return smallest integer
length of pattern.
Warning: equal_divisions discards the remainder, so if it doesn't
fit the pattern, you will get a false postive.
The specific use is to check recurring decimal patterns, so it doesn't
matter for that use.
"""
if len(input_string) < 2:
return False
length_of_division = 1
limit = len(input_string)//2
while length_of_division < limit + 1:
divisions = equal_division(input_string, length_of_division)
divisions = set(divisions)
if len(divisions) == 1:
return length_of_division
else:
length_of_division += 1
return False
|
6d6e32c7228ef3cec4107a3354fe53b90ef69e04
| 18,825 |
import logging
def get_xml_namespace(file_name,pkg_type):
"""Get xml's namespace.
Args:
file_name: The path of xml file.
Returns:
xml_namespace: The namespace of xml.
for example:
xml file content:
...
<config xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
<interfaces xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces"
xmlns:ni="urn:ietf:params:xml:ns:yang:ietf-network-instance">
...
</interfaces>
</config>
xml_namespace: 'urn:ietf:params:xml:ns:yang:ietf-interfaces'
Raises:
Exception: Capture execution exception.
"""
feature_namespaces = []
try:
doc = parse(file_name)
root = doc.documentElement
if pkg_type in ['get','get-config']:
if root.getElementsByTagNameNS("urn:ietf:params:xml:ns:netconf:base:1.0", "filter"):
child_nodes = root.getElementsByTagNameNS("urn:ietf:params:xml:ns:netconf:base:1.0", "filter")[
0].childNodes
elif pkg_type == 'config':
if root.getElementsByTagNameNS("urn:ietf:params:xml:ns:netconf:base:1.0", "config"):
child_nodes = root.getElementsByTagNameNS("urn:ietf:params:xml:ns:netconf:base:1.0", "config")[
0].childNodes
else:
child_nodes = root.childNodes
logging.info("This is rpc-xml:" + file_name)
for child_node in child_nodes:
if child_node.nodeType == 1 and hasattr(child_node, 'namespaceURI'):
feature_namespaces.append(child_node.namespaceURI)
except ExpatError as expat_exception:
xml_structure_except(expat_exception, file_name)
except Exception as error_str:
error_write(error_str)
return feature_namespaces
|
401bf5c321b5626d7b7171e2270df04863a01d61
| 18,826 |
def build_successors_table(tokens):
"""Return a dictionary: keys are words; values are lists of
successors.
>>> text = ['We', 'came', 'to', 'investigate', ',', 'catch', 'bad', 'guys', 'and', 'to', 'eat', 'pie', '.']
>>> table = build_successors_table(text)
>>> sorted(table)
[',', '.', 'We', 'and', 'bad', 'came', 'catch', 'eat', 'guys', 'investigate', 'pie', 'to']
>>> table['to']
['investigate', 'eat']
>>> table['pie']
['.']
>>> table['.']
['We']
"""
table = {}
prev = '.'
for word in tokens:
if prev not in table:
table[str(prev)] = [str(word)]
else: # if already in table then add this word to the list of successors
table[str(prev)] += [str(word)]
prev = word
return table
|
92206bf3dd40518c23c6fb98e22dc818912c5bcc
| 18,827 |
import math
def _rolling_nanmin_1d(a, w=None):
"""
Compute the rolling min for 1-D while ignoring NaNs.
This essentially replaces:
`np.nanmin(rolling_window(T[..., start:stop], m), axis=T.ndim)`
Parameters
----------
a : numpy.ndarray
The input array
w : numpy.ndarray, default None
The rolling window size
Returns
-------
output : numpy.ndarray
Rolling window nanmin.
"""
if w is None:
w = a.shape[0]
half_window_size = int(math.ceil((w - 1) / 2))
return minimum_filter1d(a, size=w)[
half_window_size : half_window_size + a.shape[0] - w + 1
]
|
37229440ba632d1ddadc55a811f9abca1c8e3132
| 18,828 |
def get_model_init_fn(train_logdir,
tf_initial_checkpoint,
initialize_last_layer,
last_layers,
ignore_missing_vars=False):
"""Gets the function initializing model variables from a checkpoint.
Args:
train_logdir: Log directory for training.
tf_initial_checkpoint: TensorFlow checkpoint for initialization.
initialize_last_layer: Initialize last layer or not.
last_layers: Last layers of the model.
ignore_missing_vars: Ignore missing variables in the checkpoint.
Returns:
Initialization function.
"""
if tf_initial_checkpoint is None:
tf.logging.info('Not initializing the model from a checkpoint.')
return None
if tf.train.latest_checkpoint(train_logdir):
tf.logging.info('Ignoring initialization; other checkpoint exists')
return None
tf.logging.info('Initializing model from path: %s', tf_initial_checkpoint)
# Variables that will not be restored.
exclude_list = ['global_step']
if not initialize_last_layer:
exclude_list.extend(last_layers)
variables_to_restore = contrib_framework.get_variables_to_restore(exclude=exclude_list)
if variables_to_restore:
init_op, init_feed_dict = contrib_framework.assign_from_checkpoint(
tf_initial_checkpoint,
variables_to_restore,
ignore_missing_vars=ignore_missing_vars)
global_step = tf.train.get_or_create_global_step()
def restore_fn(sess):
sess.run(init_op, init_feed_dict)
sess.run([global_step])
return restore_fn
return None
|
7fdd1bcff59fc01dff2f1ef49eda4bd29b162ea2
| 18,829 |
import time
def tokenize_protein(text):
"""
Tokenizes from a proteins string into a list of strings
"""
aa = ['A','C','D','E','F','G','H','I','K','L',
'M','N','P','Q','R','S','T','V','W','Y']
N = len(text)
n = len(aa)
i=0
seq = list()
timeout = time.time()+5
for i in range(N):
symbol = text[i]
if (symbol in aa):
seq.append(symbol)
else:
seq.append('X')
if time.time() > timeout:
break
return seq
|
7dba531023aef97dcbfb37af75a9a1459a1e94d2
| 18,830 |
from typing import Callable
def read_xml_string() -> Callable[[int, int, str], str]:
"""Read an XML file to a string. Subsection string needs to include a prepending '-'."""
def _read_xml_string(number: int, year: int, subsection: str) -> str:
xmlfile = f"tests/data/xmls/session-{number:03}-{year}{subsection}.xml"
with open(xmlfile, "r", encoding="utf-8") as infile:
lines = infile.readlines()
return " ".join([line.strip() for line in lines])
return _read_xml_string
|
2b4e4c3585e26138e5fecf820699e97e1011a842
| 18,831 |
def compute_mean_std_data(filelist):
"""
Compute mean and standard deviation of a dataset.
:param filelist: list of str
:return: tuple of floats
"""
tensor_list = []
for file in filelist:
img = Image.open(file)
img_np = np.array(img).ravel()
tensor_list.append(img_np.ravel())
pixels = np.concatenate(tensor_list, axis=0)
return np.mean(pixels), np.std(pixels)
|
57c8d5e9294e291e9897ac0e865a661319123965
| 18,832 |
def ConstVal(val):
"""
Creates a LinComb representing a constant without creating a witness or instance variable
Should be used carefully. Using LinCombs instead of integers where not needed will hurt performance
"""
if not isinstance(val, int):
raise RuntimeError("Wrong type for ConstVal")
return LinComb(val, backend.one() * val)
|
d715564ea09224590be827d3e32043c4b66c5cfd
| 18,833 |
def filter_required_flat_tensor_spec(flat_tensor_spec):
"""Process a flat tensor spec structure and return only the required subset.
Args:
flat_tensor_spec: A flattened sequence (result of flatten_spec_structure)
with the joined string paths as OrderedDict. Since we use OrderedDicts we
can safely call flatten_spec_structure multiple times.
Raises:
ValueError: If the passed flat_tensor_spec is not a valid flat tensor_spec
structure.
Returns:
filtered_flat_required_tensor_spec: The same flattened sequence but only
the {key: tensor_spec} pairs for the non optional tensor_spec.
"""
if not is_flat_spec_or_tensors_structure(flat_tensor_spec):
raise ValueError('Only flat tensor_spec structures are allowed.')
filtered_flat_required_tensor_spec = TensorSpecStruct()
for key, value in flat_tensor_spec.items():
if hasattr(value, 'is_optional') and value.is_optional:
continue
filtered_flat_required_tensor_spec[key] = value
return filtered_flat_required_tensor_spec
|
aa55e790cd335030cf2c821dd006213db022b78a
| 18,834 |
def callback(photolog_id):
""" twitter로부터 callback url이 요청되었을때
최종인증을 한 후 트위터로 해당 사진과 커멘트를 전송한다.
"""
Log.info("callback oauth_token:" + request.args['oauth_token']);
Log.info("callback oauth_verifier:" + request.args['oauth_verifier']);
# oauth에서 twiter로 부터 넘겨받은 인증토큰을 세션으로 부터 가져온다.
OAUTH_TOKEN = session['OAUTH_TOKEN']
OAUTH_TOKEN_SECRET = session['OAUTH_TOKEN_SECRET']
oauth_verifier = request.args['oauth_verifier']
try:
# 임시로 받은 인증토큰을 이용하여 twitter 객체를 만들고 인증토큰을 검증한다.
twitter = Twython(current_app.config['TWIT_APP_KEY'],
current_app.config['TWIT_APP_SECRET'],
OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
final_step = twitter.get_authorized_tokens(oauth_verifier)
# oauth_verifier를 통해 얻은 최종 인증토큰을 이용하여 twitter 객체를 새로 생성한다.
twitter = Twython(current_app.config['TWIT_APP_KEY'],
current_app.config['TWIT_APP_SECRET'],
final_step['oauth_token'],
final_step['oauth_token_secret'])
session['TWITTER'] = twitter
# 파라미터로 받은 photolog_id를 이용하여 해당 사진과 커멘트를 트위터로 전송한다.
__send_twit(twitter, photolog_id)
except TwythonError as e:
Log.error("callback(): TwythonError , "+ str(e))
session['TWITTER_RESULT'] = str(e)
return redirect(url_for('.show_all'))
|
3dcca97278cf20f819fa357b85e971dae9a6dac8
| 18,835 |
def calc_adjusted_pvalues(adata, method='fdr_by'):
"""Calculates pvalues adjusted per sample with the given method.
:param data: AnnData object annotated with model fit results.
:param method: Name of pvalue adjustment method (from
statsmodels.stats.multitest.multipletests).
:return: AnnData object with adjusted pvalues.
"""
assert "X_pvalue" in adata.layers.keys(), (
'No X_pvalue found in AnnData object, calculate pvalues first.')
adata.layers["X_padj"] = (np.array([multiple_testing_nan(row,
method=method)
for row in adata.layers["X_pvalue"]]))
return adata
|
0097ceca4918ef4a4c4376c092b040752f408036
| 18,836 |
def create_model(model_type='mobilenet'):
"""
Create a model.
:param model_type: Must be one of 'alexnet', 'vgg16', 'resnet50' or 'mobilenet'.
:return: Model.
"""
if model_type is 'alexnet':
net = mdl.alexnet(input_shape, num_breeds, lr=0.001)
elif model_type is 'vgg16':
net = mdl.vgg16(input_shape, num_breeds, lr=0.0001)
elif model_type is 'resnet50':
net = mdl.resnet50(input_shape, num_breeds, lr=0.0002) # 0.01
elif model_type is 'mobilenet':
net = mdl.mobilenet(input_shape, num_breeds, lr=0.0001) # 0.01
else:
print("Model type is not supported.")
return net
|
44ab632eff28e40b5255094e2009b479e042b00b
| 18,837 |
def generate_voter_groups():
"""Generate all possible voter groups."""
party_permutations = list(permutations(PARTIES, len(PARTIES)))
voter_groups = [VoterGroup(sequence) for sequence in party_permutations]
return voter_groups
|
16c55002600bf76178c529f1140fb28831d5065e
| 18,838 |
import logging
def add_image_fuzzy_pepper_noise(im, ration=0.1, rand_seed=None):
""" generate and add a continues noise to an image
:param ndarray im: np.array<height, width> input float image
:param float ration: number means 0 = no noise
:param rand_seed: random initialization
:return ndarray: np.array<height, width> float image
>>> img = np.zeros((5, 9), dtype=int)
>>> img[1:4, 2:7] = 1
>>> img = add_image_fuzzy_pepper_noise(img, ration=0.5, rand_seed=0)
>>> np.round(img, 2)
array([[ 0.1 , 0.43, 0.21, 0.09, 0.15, 0.29, 0.12, 0. , 0. ],
[ 0.23, 0. , 0.94, 0.86, 1. , 1. , 1. , 0. , 0. ],
[ 0. , 0. , 1. , 1. , 1.08, 1. , 1. , 0.28, 0. ],
[ 0. , 0.04, 1.17, 1.47, 1. , 1.09, 0.86, 0. , 0.24],
[ 0.22, 0.23, 0. , 0.36, 0.28, 0.13, 0.4 , 0. , 0.33]])
"""
logging.debug('... add smooth noise to a probability image')
np.random.seed(rand_seed)
rnd = 2 * (np.random.random(im.shape) - 0.5)
rnd[abs(rnd) > ration] = 0
im_noise = np.abs(im - rnd)
# plt.subplot(1,3,1), plt.imshow(im)
# plt.subplot(1,3,2), plt.imshow(rnd)
# plt.subplot(1,3,3), plt.imshow(im - rnd)
# plt.show()
return im_noise
|
9bbf38b1d4fd16011dc884e98a25e8d872fef534
| 18,839 |
import random
def generator(fields, instance):
"""
Calculates the value needed for a unique ordered representation of the fields
we are paginating.
"""
values = []
for field in fields:
neg = field.startswith("-")
# If the field we have to paginate by is the pk, get the pk field name.
if field == 'pk':
field = instance._meta.pk.name
value = instance._meta.get_field(field.lstrip("-")).value_from_object(instance)
if hasattr(value, "isoformat"):
value = value.isoformat()
value = unicode(value)
if neg:
# this creates the alphabetical mirror of a string, e.g. ab => zy, but for the full
# range of unicode characters, e.g. first unicode char => last unicode char, etc
value = u"".join([ unichr(0xffff - ord(x)) for x in value ])
values.append(value)
values.append(unicode(instance.pk) if instance.pk else unicode(random.randint(0, 1000000000)))
return NULL_CHARACTER.join(values)
|
3d6f3837e109720ec78460dcd56b6cf1b3ddc947
| 18,840 |
from typing import Any
from typing import Union
def token_hash(token: Any, as_int: bool = True) -> Union[str, int]:
"""Hash of Token type
Args:
token (Token): Token to hash
as_int (bool, optional): Encode hash as int
Returns:
Union[str, int]: Token hash
"""
return _hash((token.text, token.start, token.end, token.id), as_int=as_int)
|
3adfc8dce2b37b86376d47f8299cb6813faab839
| 18,841 |
import six
import base64
from datetime import datetime
def generate_totp_passcode(secret):
"""Generate TOTP passcode.
:param bytes secret: A base32 encoded secret for TOTP authentication
:returns: totp passcode as bytes
"""
if isinstance(secret, six.text_type):
secret = secret.encode('utf-8')
while len(secret) % 8 != 0:
secret = secret + b'='
decoded = base64.b32decode(secret)
totp = TOTP(
decoded, 6, SHA1(), 30, backend=default_backend())
return totp.generate(timegm(datetime.utcnow().utctimetuple())).decode()
|
2f0392e86b5d84970ec43bbd4d647ca29345a373
| 18,842 |
def all_ndcubes(request):
"""
All the above ndcube fixtures in order.
"""
return request.getfixturevalue(request.param)
|
906412ebe9a26de5cfddcb1d1431ab014c8084c6
| 18,843 |
from pathlib import Path
import warnings
def read_xmu(fpath: Path, scan: str='mu', ref: bool=True, tol: float=1e-4) -> Group:
"""Reads a generic XAFS file in plain format.
Parameters
----------
fpath
Path to file.
scan
Requested mu(E). Accepted values are transmission ('mu'), fluorescence ('fluo'),
or None. The default is 'mu'.
ref
Indicates if the transmission reference ('mu_ref') should also be returned.
The default is True.
tol
Tolerance in energy units to remove duplicate values.
Returns
-------
:
Group containing the requested arrays.
Notes
-----
:func:`read_xmu` assumes the following column order in the file:
1. energy.
2. transmission/fluorescence mu(E).
3. transmission reference.
See also
--------
read_file : Reads a XAFS file based on specified columns.
Examples
--------
>>> from araucaria import Group
>>> from araucaria.io import read_xmu
>>> from araucaria.testdata import get_testpath
>>> from araucaria.utils import check_objattrs
>>> fpath = get_testpath('xmu_testfile.xmu')
>>> # extracting mu and mu_ref scans
>>> group_mu = read_xmu(fpath, scan='mu')
>>> check_objattrs(group_mu, Group, attrlist=['mu', 'mu_ref'])
[True, True]
>>> # extracting only fluo scan
>>> group_fluo = read_xmu(fpath, scan='fluo', ref=False)
>>> check_objattrs(group_fluo, Group, attrlist=['fluo'])
[True]
>>> # extracting only mu_ref scan
>>> group_ref = read_xmu(fpath, scan=None, ref=True)
>>> check_objattrs(group_ref, Group, attrlist=['mu_ref'])
[True]
"""
# default modes and channels
scandict = ['mu', 'fluo', None]
coldict = {'fluo':1, 'mu':1, 'mu_ref':2}
# testing that scan exists in the current dictionary
if scan not in scandict:
warnings.warn("scan mode %s not recognized. Retrieving transmission measurement ('mu')." %scan)
scan = 'mu'
if scan is None:
usecols = (0, coldict['mu_ref'])
else:
usecols = (0, coldict[scan], coldict['mu_ref'])
group = read_file(fpath, usecols, scan, ref, tol)
return (group)
|
e5889fa309b7fb836cc5b7ea50f8987a647f00a2
| 18,844 |
def filter_order_by_oid(order, oid):
"""
:param order:
:type order: :class:`tests.testapp.testapp.trading.models.Order`
:param oid: Order ID
:type oid: int
"""
return order.tid == oid
|
bf84e2e9f2fa19dc19e1d42ceef92dd3050d1e89
| 18,845 |
from skaldship.passwords.utils import process_password_file, insert_or_update_acct
import logging
def process_pwdump_loot(loot_list=[], msf=None):
"""
Takes an array of loot records in loot_list, downloads the pwdump file and
adds the users.
"""
db = current.globalenv['db']
#cache = current.globalenv['cache']
data = []
for loot_id in loot_list:
loot = msf.loot_download(loot_id)
if loot['ltype'] not in ['host.windows.pwdump', 'windows.hashes']:
log("Loot is not a pwdump, it is a %s" % loot['ltype'], logging.ERROR)
continue
else:
# process the pwdump file
pw_data = loot['data'].split('\n')
accounts = process_password_file(
pw_data=pw_data,
file_type='PWDUMP',
source='Metasploit',
)
# find the info/0 service id for the host
host = get_host_record(loot['host'])
query = (db.t_services.f_number == '0') & (db.t_services.f_proto == 'info') & (db.t_services.f_hosts_id == host.id)
svc_id = db(query).select().first()
if svc_id is None:
# info/0 not found.. add it!
svc_id = db.t_services.insert(f_proto="info", f_number="0", f_status="info", f_hosts_id=host.id)
db.commit()
# insert or update the account records
resp_text = insert_or_update_acct(svc_id.id, accounts)
log("Added pwdump records for host: %s" % host.f_ipaddr)
data.append({loot['host']: resp_text})
return data
|
57448b24350dd66271906ba5fcdc0e4453d898e9
| 18,846 |
def has_poor_grammar(token_strings):
"""
Returns whether the output has an odd number of double quotes or if it does not have balanced
parentheses.
"""
has_open_left_parens = False
quote_count = 0
for token in token_strings:
if token == '(':
if has_open_left_parens:
return True
else:
has_open_left_parens = True
elif token == ')':
if has_open_left_parens:
has_open_left_parens = False
else:
return True
elif token == '"':
quote_count += 1
return quote_count % 2 == 1 or has_open_left_parens
|
b35c6af0ec771ac22ff66d9ca875f5d916cb9489
| 18,847 |
def run(main, *, debug=False):
"""
Since we're using asyncio loop to run wait() in irder to be compatible with async calls,
here we also run each wait in a different thread to allow nested calls to wait()
"""
thread = RunnerThread(main, debug=debug)
thread.start()
thread.join()
if thread.exception:
raise thread.exception
return thread.result
|
47c70c887e456ac69f5c767bf0f1e56c050f8f4b
| 18,848 |
import pandas as pd
def csv_dataset_reader(path):
"""
This function reads a csv from a specified path and returns a Pandas dataframe representation of it, and renames
columns.
:param path: Path to and name of the csv file to read.
:return: A Pandas dataframe.
"""
data = pd.read_csv(path, sep=",", header=None)
data.columns = ['age', 'weight', 'height']
return data
|
59a298c50bf060809ebbebc5d0ff3d9670e84244
| 18,849 |
def get_daily_blurb_info():
"""Get daily blurb info."""
html, ss_image_1day_file, ss_image_1year_file = _scrape()
return _parse(html, ss_image_1day_file, ss_image_1year_file)
|
ffe84accebda5780e55d34e58137288d02bc072d
| 18,850 |
import torch
def generate_random_ring_element(size, ring_size=(2 ** 64), **kwargs):
"""Helper function to generate a random number from a signed ring"""
# TODO (brianknott): Check whether this RNG contains the full range we want.
rand_element = torch.randint(
-(ring_size // 2), (ring_size - 1) // 2, size, dtype=torch.long, **kwargs
)
if rand_element.is_cuda:
return CUDALongTensor(rand_element)
return rand_element
|
6e7ea30e5b4ccbde7dc48d1fe8fa51468344c335
| 18,851 |
def otsu_binarization(img):
"""
Method to perform Otsu Binarization
:param img: input image
:return: thresholded image
"""
ret2, th2 = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return th2
|
99953288b893d56e17a9e9654393aa284eaae4b7
| 18,852 |
def rosstack_depends_1(s):
"""
@param s: stack name
@type s: str
@return: A list of the names of the stacks which s depends on directly
@rtype: list
"""
return rosstackexec(['depends1', s]).split()
|
e917c62c628498e1f100c045bf8e966ea3bfd355
| 18,853 |
def _config_file_is_to_update():
"""
Ask the user if the configuration file should be updated or not.
:return: Returns True if the user wants to update the configuration file and False otherwise.
:rtype: bool
"""
if yes_or_no_input("Do you want to save the account on the configuration file?") == USER_INPUT_YES:
return True
return False
|
e14be78e150e28b87a0e8f179cc86f4a240a60d3
| 18,854 |
import subprocess
def countbam(sortedbam, outdir):
"""calculates the raw counts from a BAM index
parameters
----------
sortedbam
string, the name of the sorted bam file
outdir
string, the path of the output directory
returns
----------
counts_file = file containing the counts
"""
counts_file = f"{sortedbam[:-3]}count"
try:
cmd_count = f"samtools idxstats {sortedbam} > {counts_file}"
res_count = subprocess.check_output(cmd_count, shell=True)
except(subprocess.CalledProcessError):
print('Unable to calculate raw counts from BAM')
return (counts_file)
|
de60c7af2a479d00487a1891a64c926f9a2e0ae0
| 18,855 |
def funcScrapeTableWunderground(html_tree, forecast_date_str):
"""
"""
# This will get you the Wunderground table headers for future hour conditions
columns = html_tree.xpath("//table[@id='hourly-forecast-table']/thead//button[@class='tablesaw-sortable-btn']")
rows = html_tree.xpath("//table[@id='hourly-forecast-table']/tbody/tr")
fill_cols = np.asarray([])
for column in columns:
# print etree.tostring(column)
col = column.xpath("text()")[0]
fill_cols = np.append(fill_cols, col)
# print(col)
# Make a DataFrame to fill
dayDf = DataFrame(columns = fill_cols)#.set_index(fill_cols[0])
# This will go through the rows of the table and grab actual values
for row in rows:
values = row.xpath("td")
for i, value in enumerate(values):
col = columns[i].xpath("text()")[0]
val = value.xpath("ng-saw-cell-parser/div//span/text()")
# print(val)
if col == 'Time':
timeVal = val
# Initializing a single row. The goal is to make it look just like what dayDf looks like
hourRow = pd.DataFrame([forecast_date_str + ' ' + (''.join(timeVal))],
columns = [col])#.set_index
elif col == 'Conditions':
hourRow[col] = val[1]
else:
if col == 'Pressure':
val = value.xpath("ng-saw-cell-parser//span/span/text()")
val = [val[0] + ' ' + val[2][0:2]]
if col in ['Precip', 'Amount']: # These are hiding behind hyperlinks. Need to be smart
val = value.xpath("ng-saw-cell-parser/div//span/a/text()")
try:
hourRow[col] = val[0]
except:
hourRow[col] = np.nan
dayDf = dayDf.append(hourRow)
dayDf['Time'] = pd.to_datetime(dayDf['Time'])
# print(columns[i].xpath("text()")[0])
# print value.xpath("ng-saw-cell-parser/div//span/text()")
return dayDf
|
aa6745565e8fa01df8b8f52f1314ee7bf1a434a8
| 18,856 |
from re import S
def as_finite_diff(derivative, points=1, x0=None, wrt=None):
"""
Returns an approximation of a derivative of a function in
the form of a finite difference formula. The expression is a
weighted sum of the function at a number of discrete values of
(one of) the independent variable(s).
Parameters
==========
derivative: a Derivative instance (needs to have an variables
and expr attribute).
points: sequence or coefficient, optional
If sequence: discrete values (length >= order+1) of the
independent variable used for generating the finite
difference weights.
If it is a coefficient, it will be used as the step-size
for generating an equidistant sequence of length order+1
centered around x0. defult: 1 (step-size 1)
x0: number or Symbol, optional
the value of the independent variable (wrt) at which the
derivative is to be approximated. default: same as wrt
wrt: Symbol, optional
"with respect to" the variable for which the (partial)
derivative is to be approximated for. If not provided it
is required that the Derivative is ordinary. default: None
Examples
========
>>> from sympy import symbols, Function, exp, sqrt, Symbol, as_finite_diff
>>> x, h = symbols('x h')
>>> f = Function('f')
>>> as_finite_diff(f(x).diff(x))
-f(x - 1/2) + f(x + 1/2)
The default step size and number of points are 1 and ``order + 1``
respectively. We can change the step size by passing a symbol
as a parameter:
>>> as_finite_diff(f(x).diff(x), h)
-f(-h/2 + x)/h + f(h/2 + x)/h
We can also specify the discretized values to be used in a sequence:
>>> as_finite_diff(f(x).diff(x), [x, x+h, x+2*h])
-3*f(x)/(2*h) + 2*f(h + x)/h - f(2*h + x)/(2*h)
The algorithm is not restricted to use equidistant spacing, nor
do we need to make the approximation around x0, but we can get
an expression estimating the derivative at an offset:
>>> e, sq2 = exp(1), sqrt(2)
>>> xl = [x-h, x+h, x+e*h]
>>> as_finite_diff(f(x).diff(x, 1), xl, x+h*sq2)
2*h*((h + sqrt(2)*h)/(2*h) - (-sqrt(2)*h + h)/(2*h))*f(E*h + x)/\
((-h + E*h)*(h + E*h)) + (-(-sqrt(2)*h + h)/(2*h) - \
(-sqrt(2)*h + E*h)/(2*h))*f(-h + x)/(h + E*h) + \
(-(h + sqrt(2)*h)/(2*h) + (-sqrt(2)*h + E*h)/(2*h))*f(h + x)/(-h + E*h)
Partial derivatives are also supported:
>>> y = Symbol('y')
>>> d2fdxdy=f(x,y).diff(x,y)
>>> as_finite_diff(d2fdxdy, wrt=x)
-f(x - 1/2, y) + f(x + 1/2, y)
See also
========
sympy.calculus.finite_diff.apply_finite_diff
sympy.calculus.finite_diff.finite_diff_weights
"""
if wrt is None:
wrt = derivative.variables[0]
# we need Derivative to be univariate to guess wrt
if any(v != wrt for v in derivative.variables):
raise ValueError('if the function is not univariate' +
' then `wrt` must be given')
order = derivative.variables.count(wrt)
if x0 is None:
x0 = wrt
if not iterable(points):
# points is simply the step-size, let's make it a
# equidistant sequence centered around x0
if order % 2 == 0:
# even order => odd number of points, grid point included
points = [x0 + points*i for i
in range(-order//2, order//2 + 1)]
else:
# odd order => even number of points, half-way wrt grid point
points = [x0 + points*i/S(2) for i
in range(-order, order + 1, 2)]
if len(points) < order+1:
raise ValueError("Too few points for order %d" % order)
return apply_finite_diff(order, points, [
derivative.expr.subs({wrt: x}) for x in points], x0)
|
4b76eae0578434a9a087b08f01eefbcd3018bc01
| 18,857 |
def is_prime(pp: int) -> bool:
"""
Returns True if pp is prime
otherwise, returns False
Note: not a very sophisticated check
"""
if pp == 2 or pp == 3:
return True
elif pp < 2 or not pp % 2:
return False
odd_n = range(3, int(sqrt(pp) + 1), 2)
return not any(not pp % i for i in odd_n)
|
f8661a7f625c198dd1d0b5b477aea22f50596a39
| 18,858 |
def createChromosome( totQty, menuData ):
"""
Creates the chromosome with Qty assigned to Each Dish such that
sum of all Qty equals to the number of dishes to be ordered
totQty = Number of Dishes to be Ordered
returns chromosome of dish id and corresponding quantity
"""
chromosome = []
qtySeq = randSeq2(len(menuData),totQty)
i=0
for key in menuData:
chromosome.append(Dish(key,qtySeq[i]))
i+=1
return chromosome
|
6dae9c5a610a50df67e18f2034513a090088e524
| 18,859 |
def add_residual(transformed_inputs, original_inputs, zero_pad=True):
"""Adds a skip branch to residual block to the output."""
original_shape = original_inputs.shape.as_list()
transformed_shape = transformed_inputs.shape.as_list()
delta = transformed_shape[3] - original_shape[3]
stride = int(np.ceil(original_shape[1] / transformed_shape[1]))
if stride > 1:
original_inputs = tf.layers.average_pooling2d(
original_inputs, pool_size=[stride] * 2, strides=stride, padding="same")
if delta != 0:
if zero_pad:
# Pad channels with zeros at the beginning and end.
if delta > 0:
original_inputs = tf.pad(
original_inputs, [[0, 0], [0, 0], [0, 0], [delta // 2, delta // 2]],
mode="CONSTANT",
constant_values=0)
else:
transformed_inputs = tf.pad(
transformed_inputs, [
[0, 0], [0, 0], [0, 0], [-delta // 2, -delta // 2]],
mode="CONSTANT",
constant_values=0)
else:
# Convolution
original_inputs = tf.layers.conv2d(
original_inputs,
filters=transformed_shape[3],
kernel_size=(1, 1),
strides=(1, 1),
padding="same",
activation=None,
use_bias=False)
net = original_inputs + transformed_inputs
return net, original_inputs
|
e32897c6e80873b863fbc3358eaec8b6191086f0
| 18,860 |
def _find_bad_channels_in_epochs(epochs, picks, use_metrics, thresh, max_iter):
"""Implements the fourth step of the FASTER algorithm.
This function attempts to automatically mark bad channels in each epochs by
performing outlier detection.
Additional Parameters
---------------------
use_metrics : list of str
List of metrics to use. Can be any combination of:
'amplitude', 'variance', 'deviation', 'median_gradient'
Defaults to all of them.
thresh : float
The threshold value, in standard deviations, to apply. A channel
crossing this threshold value is marked as bad. Defaults to 3.
max_iter : int
The maximum number of iterations performed during outlier detection
(defaults to 1, as in the original FASTER paper).
"""
metrics = {
'amplitude': lambda x: np.ptp(x, axis=2),
'deviation': lambda x: _deviation(x),
'variance': lambda x: np.var(x, axis=2),
'median_gradient': lambda x: np.median(np.abs(np.diff(x)), axis=2),
'line_noise': lambda x: _freqs_power(x, epochs.info['sfreq'],
[50, 60]),
}
if use_metrics is None:
use_metrics = metrics.keys()
info = pick_info(epochs.info, picks, copy=True)
data = epochs.get_data()[:, picks]
bads = dict((m, np.zeros((len(data), len(picks)), dtype=bool)) for
m in metrics)
for ch_type, chs in _picks_by_type(info):
ch_names = [info['ch_names'][k] for k in chs]
chs = np.array(chs)
for metric in use_metrics:
logger.info('Bad channel-in-epoch detection on %s channels:'
% ch_type.upper())
s_epochs = metrics[metric](data[:, chs])
for i_epochs, epoch in enumerate(s_epochs):
outliers = find_outliers(epoch, thresh, max_iter)
if len(outliers) > 0:
bad_segment = [ch_names[k] for k in outliers]
logger.info('Epoch %d, Bad by %s:\n\t%s' % (
i_epochs, metric, bad_segment))
bads[metric][i_epochs, chs[outliers]] = True
return bads
|
6b4a0acc1eb4e1fc4f229cc237e071bf87047b5e
| 18,861 |
def solution(A): # O(N^2)
"""
For a given value A, compute the number with the fewest number of
squared values and return them within an array.
eg. 26 can be computed with squared values [25, 1] or [16, 9, 1], but the
answer is only [25, 1] as we are looking for the fewest number of
squared values
>>> solution(26)
[25, 1]
>>> solution(128)
[64, 64]
>>> solution(33)
[25, 4, 4]
>>> solution(256)
[256]
"""
queue = deque() # O(1)
ready_queue(A, queue, []) # O(N)
return process_queue(queue) # O(N^2)
|
56f899d94cfc07a412a357a305553ad0ed8af092
| 18,862 |
import time
from sys import path
from pathlib import Path
def gmrt_guppi_bb(rawfile, npol=2, header=None, chunk=None, samples_per_frame=4096, nchan=1):
"""
To read gmrt raw voltages file of GWB to convert to guppi raw
:USAGE:
--------
$ gmrt_raw_toguppi [-h] [-f FILENAME] [-c CHUNK] [-hdr HEADER] [-hf HEADER_FILE] [-hfo HEADER_FILE_OUTPUT]
To read gmrt raw voltages file of GWB to convert to guppi raw
optional arguments:
-h, --help show this help message and exit
-f FILENAME, --filename FILENAME
Input filename for conversion to guppiraw.
-c CHUNK, --chunk CHUNK
Input chunk size to read the desired chunk of byte.
-hdr HEADER, --header HEADER
Input header to inject to the raw file.
-hf HEADER_FILE, --header-file HEADER_FILE
Input header from path to inject to the raw file.
-hfo HEADER_FILE_OUTPUT, --header-file-output HEADER_FILE_OUTPUT
output header from path to inject to the raw file.
NOTE
-----
imaginary data is not being read as documentation(https://baseband.readthedocs.io/en/stable/api/baseband.guppi.open.html#baseband.guppi.open):
For GUPPI, complex data is only allowed when nchan > 1.
"""
b=time.time()
if path.isfile(rawfile):
rawname=Path(rawfile).stem
if header is None:
header = {#'CHAN_BW':-100,
'TBIN':1, #provide sample rate in astropy.units * Hz
'TELESCOP':'GMRT',
'NPOL':npol,
'NCHAN':nchan,
'OBSERVER':'Vishal Gajjar',
'STT_IMJD':58132,
'STT_SMJD':51093,
'NBITS':8}
print(f'selected parameters: rawfile={rawfile}, npol={npol}, header={header}, chunk={chunk}, samples_per_frame={samples_per_frame}, nchan={nchan}')
print(f'copying file:{rawfile}')
if chunk is None:
npcm_data=np.memmap(rawfile, dtype='<i1', mode='r' )#,shape=(4096,))
else:
npcm_data=np.memmap(rawfile, dtype='<i1', mode='r', shape=(chunk,))
print(f'copied file :{time.time()-b}')
#npcm_data.flush()
#number_of_frames = totalsamples/samples_per_frame
#shape = (samples_per_frame,number_of_frames)
#npcm_data.flush()
real1_d =npcm_data # 0,2,4 indexed
im_d=np.zeros(np.shape(real1_d))
resd=np.array([real1_d,im_d], dtype='<i1').transpose()
guppifile=rawname+''
print(f'writing file stem: {guppifile}')
#fgh = guppi.open(guppifile+'_guppi.{file_nr:04d}.raw', 'ws', frames_per_file=1180013,
fgh = guppi.open(guppifile+'_guppi.0000.raw', 'ws',
samples_per_frame=samples_per_frame, nchan=nchan,
#npol=npol, #sample_rate=2.0E+08*u.Hz,
**header)
print(f'data shape: {np.shape(resd)}')
fgh.write(resd)
# -------------- when you have [p1r1,p1i1,p2r1,p2i1...]
# im_d = npcm_data[1::2] # even indexed
# ## pol1, pol2 = npcm_data[::2], npcm_data[1::2] # if no imaginary is in the bytes
# #pol1, pol2 = real_d[::2], real_d[1::2]
# ## pol1, pol2 = npcm_data[::2][::2], npcm_data[::2][1::2]
# pol1_real = real_d[::2]
# pol2_real = real_d[1::2]
# pol1_im=im_d[1::2]
# pol2_im=im_d[::2] # if you need imaginary and real
# pol1=pol1_real+pol1_im*1j
# pol2=pol2_real+pol2_im*1j
# #resd=np.array([pol1,pol2]).transpose()
# guppifile=rawname+''
# print(f'writing file stem: {guppifile}')
# #fgh = guppi.open(guppifile+'_guppi.{file_nr:04d}.raw', 'ws', frames_per_file=1180013,
# fgh = guppi.open(guppifile+'_guppi.0000.raw', 'ws',
# samples_per_frame=samples_per_frame, nchan=nchan,
# #npol=npol, #sample_rate=2.0E+08*u.Hz,
# **header)
# #fgh.write(resd)
# resd=np.array([[pol1,pol2],[pol1,pol2]] , dtype='complex64').transpose()
# print(f'data shape: {np.shape(resd)}')
# #fgh.write(np.array([npcm_data[::2][::2], npcm_data[::2][1::2]]).transpose())
# fgh.write(resd)
#fgh.write(np.array(npcm_data))
print(f'file writing completed: {time.time()-b}')
fgh.close()
return f'file created: {guppifile}'
else:
return f'file does not exist : {rawfile}'
|
cc135ec0dfeec0fe9f946ca1eac57bc979e024ea
| 18,863 |
def get_device_path():
"""Return device path."""
if is_gce():
return None
devices = get_devices()
device_serial = environment.get_value('ANDROID_SERIAL')
for device in devices:
if device_serial == device.serial:
return device.path
return None
|
5bd8bf47859c3721e47cfc45b49aaa06bed4159e
| 18,864 |
def pattern_maker(size, dynamic):
"""
Generate a pattern with pixel values drawn from the [0, 1] uniform
distribution
"""
def pattern():
return np.random.rand(size)
def static():
a_pattern = pattern()
def fn():
return a_pattern
return fn
return pattern if dynamic else static()
|
3fd256fe3f8c7669faec8a7d1757a334a51145ba
| 18,865 |
def RMSE(a, b):
""" Return Root mean squared error """
return np.sqrt(np.square(np.subtract(a, b)).mean())
|
7d853535fb9e4072f983f05ad192cc38f2bbea8e
| 18,866 |
def alpha_a_b(coord, N, silent=True):
"""Calculate alpha, a, b for a rectangle with coordinates coord and
truncation at N."""
[x0, x1, y0, y1] = coord
a = 0
for zero in zeros[:N]:
a += exp(-zero*y0)/abs(complex(0.5, zero))
b = 0
for zero in zeros[N:]:
b += exp(-zero*y0)/abs(complex(0.5, zero))
def F_north(x):
return abs(F_N(complex(x, y1), N))
def F_south(x):
return abs(F_N(complex(x, y0), N))
def F_east(y):
return abs(F_N(complex(x1, y), N))
def F_west(y):
return abs(F_N(complex(x0, y), N))
# def x_bounds(f_new, x_new, f_old, x_old):
# return x0 <= x_new[0] <= x1
# def y_bounds(f_new, x_new, f_old, x_old):
# return y0 <= x_new[0] <= y1
ns_kwargs = {"bounds":[(x0, x1)]}
ew_kwargs = {"bounds":[(y0, y1)]}
min_north = basinhopping(F_north, 0.5*(x0 + x1), stepsize=0.5*(x1-x0), minimizer_kwargs=ns_kwargs)
min_south = basinhopping(F_south, 0.5*(x0 + x1), stepsize=0.5*(x1-x0), minimizer_kwargs=ns_kwargs)
min_east = basinhopping(F_east, 0.5*(y0 + y1), stepsize=0.5*(y1-y0), minimizer_kwargs=ew_kwargs)
min_west = basinhopping(F_west, 0.5*(y0 + y1), stepsize=0.5*(y1-y0), minimizer_kwargs=ew_kwargs)
# if not silent:
# print('min_north')
# print(min_north)
# print('min_south')
# print(min_south)
# print('min_east')
# print(min_east)
# print('min_west')
# print(min_west)
min_north = min_north.fun
min_south = min_south.fun
min_east = min_east.fun
min_west = min_west.fun
if not silent:
print((min_north, min_south, min_east, min_west))
alpha = min(min_north, min_south, min_east, min_west)
return alpha, a, b
|
41cc57c16a7526bf7a88503ea9315872062b8ac5
| 18,867 |
from typing import Any
from typing import Optional
def asdataset(
dataclass: Any,
reference: Optional[DataType] = None,
dataoptions: Any = None,
) -> Any:
"""Create a Dataset object from a dataclass object.
Args:
dataclass: Dataclass object that defines typed Dataset.
reference: DataArray or Dataset object as a reference of shape.
dataoptions: Options for Dataset creation.
Returns:
Dataset object created from the dataclass object.
"""
if dataoptions is None:
try:
dataoptions = dataclass.__dataoptions__
except AttributeError:
dataoptions = DataOptions(xr.Dataset)
model = DataModel.from_dataclass(dataclass)
dataset = dataoptions.factory()
for entry in model.data_vars:
dataset[entry.name] = entry(reference)
for entry in model.coords:
if entry.name in dataset.dims:
dataset.coords[entry.name] = entry(dataset)
for entry in model.coords:
if entry.name not in dataset.dims:
dataset.coords[entry.name] = entry(dataset)
for entry in model.attrs:
dataset.attrs[entry.name] = entry()
return dataset
|
4baf2df39f906f2b1981cb597cb6430e95bb1ca1
| 18,868 |
def get_edge_size(reader: ChkDirReader, chunks: list[ChunkRange], tilesize: int) -> int:
"""Gets the size of an edge tile from an unknown chunk"""
for chunk in chunks:
data: bytes = deflate_range(reader, chunk.start, chunk.end, True)
if data is None:
continue
try:
decompressed: bytes = lzo.decompress(data, False, MAX_BUFFER_LEN)
pixel_count: float = len(decompressed) / 4 # RGBA per-pixel
edge_length = pixel_count / tilesize # rect edge length
return int(edge_length)
except: # pylint: disable=bare-except
continue
return -1
|
54da5c4adafbcccae4cee9112e35470a97172b00
| 18,869 |
import time
import torch
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2) # This line can prevent deadlock problem in some cases.
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
# encode mask results
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results))
for bbox_results, mask_results in result]
results.extend(result)
if rank == 0:
batch_size = len(result)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
gpu_collect = True
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
|
8ec9bf7efcd126485a8066c7d5932b0c84c44b63
| 18,870 |
def toRegexp(exp,terminate=False,lower=False):
""" Case sensitive version of the previous one, for backwards compatibility """
return toCl(exp,terminate,wildcards=('*',),lower=lower)
|
d550164d7d2a628a0b0bcf37f5ee95de958fc2e5
| 18,871 |
def marker_used_in_game(marker_id: int) -> bool:
"""
Determine whether the marker ID is used in the game.
:param marker_id: An official marker number, mapped to the competitor range.
:returns: True if the market is used in the game.
"""
return any([marker_id in marker_range for marker_range in MARKER_SIZES])
|
437d5b8c3ff80683e3f19d5cb3786243c6e430b3
| 18,872 |
def iround(x):
"""
Round an array to the nearest integer.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {numpy.ndarray, scalar}
The rounded elements in `x`, with `int` dtype.
"""
return np.round(x).astype(int)
|
64837773f12eb096ede5d8963360ab28427b015d
| 18,873 |
def multiLineManager(pentadList = [], debugMode = False):
""" Takes the complete list of pentads and returns this list once every multilines
being put on a single line. That's ALL.
"""
output = []
currentStatement = ""
presentChar = 'a'
startingLine = 0
i = 0
state = "Nothing"
for i in range (len(pentadList)):
########### Variables #############
for j in range (len(pentadList[i].text)):
presentChar = str(pentadList[i].text[j])
if(debugMode) : print("MultiLM printing --> ", "j = ", j, " len = ", len(pentadList[i].text), "presentChar = ", presentChar)
##########################################
if(currentStatement == ""):
if(debugMode) : print("MultiLM printing --> ", "No char yet in the buffer")
startingLine = i
if(j == len(pentadList[i].text)-1):
if(debugMode) : print("MultiLM printing --> ", "\'\\n\' detected.")
state = "Store and Restart"
if(presentChar != '\n' and presentChar != '\\' ):
currentStatement += presentChar
if(debugMode) : print("MultiLM printing --> ", "char \'", presentChar, "\' added to", currentStatement)
if(presentChar == '\\'):
if(currentStatement != ""):
currentStatement += " "
if(debugMode) : print("MultiLM printing --> ", "char ", presentChar, "\' detected and replaced by space.") #to avoid the case "unsigned\int" becoming "unsignedint" for eg.
if(state == "Store and Restart"):
state = "Nothing"
newPentad = pentadStruct([pentadList[startingLine].lines[0], pentadList[i].lines[1]], currentStatement)
for roleOfPreviousLine in pentadList[i].roles:
newPentad.addRole(roleOfPreviousLine.type, roleOfPreviousLine.mainVar, roleOfPreviousLine.otherVars)
output.append(newPentad)
currentStatement = ""
if(currentStatement != ""):
output.append(pentadStruct([startingLine, i], currentStatement))
return spaceNormalizer(output, debugMode)
|
d1a5853ddbb94c94a2440b99148023ecd3f8abde
| 18,874 |
def get_ec2_conn():
"""
Requried: env.aws_region, env.aws_access_key, env.aws_secret_access_key
return conneciton to aws ec2
"""
conn = boto.ec2.connect_to_region(
env.aws_region,
aws_access_key_id=env.aws_access_key,
aws_secret_access_key=env.aws_secret_access_key
)
if conn is None:
print(red("Can't connect to ec2 region"))
return conn
|
5c2014f7d1a3ba465ec7f205ac34a5c1feeb2aac
| 18,875 |
import tqdm
import sys
def eval_nominal_domain(pool: SamplerPool, env: SimEnv, policy: Policy, init_states: list) -> list:
"""
Evaluate a policy using the nominal (set in the given environment) domain parameters.
:param pool: parallel sampler
:param env: environment to evaluate in
:param policy: policy to evaluate
:param init_states: initial states of the environment which will be fixed if not set to None
:return: list of rollouts
"""
# Strip all domain randomization wrappers from the environment
env = remove_all_dr_wrappers(env, verbose=True)
pool.invoke_all(_setup_env_policy, env, policy)
# Run with progress bar
with tqdm(leave=False, file=sys.stdout, unit='rollouts', desc='Sampling') as pb:
return pool.run_map(_run_rollout_nom, init_states, pb)
|
0376438fc48b9442532edc8c57572a6fe87ccbc9
| 18,876 |
def create_final_comment_objects():
"""Goes through the final comments and returns an array
of objects."""
arr = [] # Stores objects
for line in final_file:
row = line.split(",")
# Set object variables for each object before adding it to the array
comment_number, comment_author, account_karma, comment_score, \
comment_num_replies, comment_permalink, comment_id, \
comment_length = [i.strip('\n') for i in row]
# Add the comment object to the array
arr.append(Final_Comment(comment_number, comment_author, account_karma, \
comment_score, comment_num_replies, \
comment_permalink, comment_id, comment_length))
return arr
|
02107ba5ebc23e5a8db1c30fa8709793e1fcbe7e
| 18,877 |
import re
def normalise_target_name(name, used=[], max_length=None):
"""
Check that name[:max_length] is not in used and
append a integer suffix if it is.
"""
def generate_name(name, i, ml):
# Create suffix string
i_name = '' if i == 0 else '_' + str(i)
# Return concatenated string if ml is not set
if ml is None:
ml = len(name) + len(i_name)
t_name = name
else:
# Work out amount of name to drop
length = len(name) + len(i_name) - ml
t_name = name if length <= 0 else name[:-length]
# If the length of i_name is greater than ml
# just warn and revert to straight append
if len(i_name) >= ml:
log.warn('Too many repetitions of name %s.', name)
t_name = name
o_name = ''.join(filter(None, [t_name, i_name]))
return '{:{ml}.{ml}}'.format(o_name, ml=ml)
name = re.sub(r'[^-A-Za-z0-9_]', '_', name)
i = 0
test_name = generate_name(name, i, max_length)
while test_name in used:
i += 1
test_name = generate_name(name, i, max_length)
return test_name
|
bffc78525d766cbb941382b6f7dd9371cffee492
| 18,878 |
def construct_pairwise_df(sr: pd.Series, np_fun):
"""Constructs an upper diagonal df from all pairwise comparisons of a sr"""
sr = sr.sort_index()
_mat = np.triu(np_fun(sr.to_numpy() - sr.to_numpy()[:, None]), k=1)
_mat[np.tril_indices(_mat.shape[0])] = None
return pd.DataFrame(_mat, index=sr.index.get_level_values('qid'),
columns=sr.index.get_level_values('qid')).rename_axis(index='qid_1', columns='qid_2')
|
bfef4a9c64e619e2d70efb3dea1fde9da5894634
| 18,879 |
def privacy(request):
"""This returns the privacy policy page"""
return render(request=request, template_name="registration/privacy.html")
|
c3467b0f670facb152c1f2cd793e6dd46301bc25
| 18,880 |
def seq_search(items, key):
"""顺序查找"""
for index, item in enumerate(items):
if item == key:
return index
return -1
|
1271555aea5f7291ebb3679a219d4b3eb81d87a7
| 18,881 |
import os
def _check_moog_files (fp,mode='r',clobber=True,max_filename_length=None):
""" Takes a moog keyword and extracts from the moogpars """
# - - - - - - - - - - - - filepath
if fp is None:
return
# - - - - - - - - - - - - check file mode
if mode not in ('r','w'):
raise ValueError("mode must be 'r' or 'w'")
# - - - - - - - - - - - - check the maximum filelength
if max_filename_length is None:
max_filename_length = opts['moog.moog_max_pathlength']
if len(fp) > max_filename_length:
warn("Filepath '{}' is too long for MOOG (max {}) omitting".format(fp,max_filename_length))
return
# - - - - - - - - - - - - check file
exists = os.path.isfile(fp)
if not exists and mode == 'r':
raise IOError("File does not exist '{}'".format(fp))
elif exists and mode == 'w' and not clobber:
raise IOError("File exist, not clobbering '{}'".format(fp))
return fp
|
20698622c1718a8765cd30f687f3550044adc358
| 18,882 |
def parse_prediction_key(key):
"""The "name" or "key" of a predictor is assumed to be like:
`ProHotspotCtsProvider(Weight=Classic(sb=400, tb=8), DistanceUnit=150)`
Parse this into a :class:`PredictionKey` instance, where
- `name` == "ProHotspotCtsProvider"
- `details` will be the dict: {"Weight" : "Classic(sb=400, tb=8)",
"DistanceUnit" : 150}
(Attempts to parse to ints or floats if possible).
"""
if "(" not in key:
return PredictionKey(key, {})
i = key.index("(")
name = key[:i].strip()
dets = key[i+1:-1]
dets = [x.strip() for x in _split_by_comma_not_in_brackets(dets)]
details = {}
for x in dets:
if "=" not in x:
key, value = x, None
else:
i = x.index("=")
key = x[:i].strip()
value = x[i+1:].strip()
try:
value = int(value)
except ValueError:
pass
if isinstance(value, str):
try:
value = float(value)
except ValueError:
pass
details[key] = value
return PredictionKey(name, details)
|
4d971da8097a237f6df8d96bb407c9706c6ed8f6
| 18,883 |
def tick2dayfrac(tick, nbTicks):
"""Conversion tick -> day fraction."""
return tick / nbTicks
|
50d01778f62203d37e733a6b328455d3ea10e239
| 18,884 |
import os
def load_esol_semi_supervised(unlabeled_size=0.1, seed=2666):
"""
Parameters
----------
unlabeled_size :
(Default value = 0.1)
seed :
(Default value = 2666)
Returns
-------
"""
esol_labeled = pinot.data.esol() # Get labeled and unlabeled data
esol_unlabeled = utils.load_unlabeled_data(
os.path.dirname(utils.__file__) + "/esol_synthetic_smiles.txt",
unlabeled_size,
seed=seed,
)()
np.random.seed(seed)
esol_labeled.extend(esol_unlabeled)
np.random.shuffle(
esol_labeled
) # Combine and mix labeled and unlabeled data
return esol_labeled
|
e0263bb5acabc48a50bab215bd7933583a952d0d
| 18,885 |
from datetime import datetime
def get_business_day_of_month(year, month, count):
"""
For a given month get the Nth business day by count.
Count can also be negative, e.g. pass in -1 for "last"
"""
r = rrule(MONTHLY, byweekday=(MO, TU, WE, TH, FR),
dtstart=datetime.datetime(year, month, 1),
bysetpos=count)
res = r[0]
if (res == None or res.month != month or res.year != year):
raise ValueError("No dates found in range. is there a flaw in your logic?")
return res.date()
|
f0322df24f63ee836cf4f98099ccc0e4eff20c67
| 18,886 |
def inpolygon(wkt, longitude, latitude):
""" To determine whether the longitude and latitude coordinate is within the orbit
:param wkt(str): the orbit wkt info
:param longitude: to determine whether the longitude within the orbit
:param latitude: to determine whether the latitude within the orbit
:return: logical value whether the coordinate within the orbit and multipolygon
"""
multipolygon = shapely.wkt.loads(wkt)
point = shapely.geometry.Point(longitude, latitude)
return multipolygon.contains(point), multipolygon
|
b844361f2fb3002a1d6df2a0301d19cc5b75470d
| 18,887 |
def matrixMultVec(matrix, vector):
"""
Multiplies a matrix with a vector and returns the result as a new vector.
:param matrix: Matrix
:param vector: vector
:return: vector
"""
new_vector = []
x = 0
for row in matrix:
for index, number in enumerate(row):
x += number * vector[index]
new_vector.append(x)
x = 0
return new_vector
|
8a03b3acfec0d91fcf0d2c85b4e2bdd4f3053dd2
| 18,888 |
def get_dev_value(weight, error):
"""
:param weight: shape [N, 1], the importance weight for N source samples in the validation set
:param error: shape [N, 1], the error value for each source sample in the validation set
(typically 0 for correct classification and 1 for wrong classification)
"""
N, d = weight.shape
_N, _d = error.shape
assert N == _N and d == _d, 'dimension mismatch!'
weighted_error = weight * error
cov = np.cov(np.concatenate((weighted_error, weight), axis=1), rowvar=False)[0][1]
var_w = np.var(weight, ddof=1)
eta = - cov / var_w
return np.mean(weighted_error) + eta * np.mean(weight) - eta
|
740dbd755cf540b0133ddf321207ea0bbd74fc83
| 18,889 |
def biLSTM(f_lstm, b_lstm, inputs, dropout_x=0.):
"""Feature extraction through BiLSTM
Parameters
----------
f_lstm : VariationalDropoutCell
Forward cell
b_lstm : VariationalDropoutCell
Backward cell
inputs : NDArray
seq_len x batch_size
dropout_x : float
Variational dropout on inputs
Returns
-------
outputs : NDArray
Outputs of BiLSTM layers, seq_len x 2 hidden_dims x batch_size
"""
for f, b in zip(f_lstm, b_lstm):
inputs = nd.Dropout(inputs, dropout_x, axes=[0]) # important for variational dropout
fo, _ = f.unroll(length=inputs.shape[0], inputs=inputs, layout='TNC', merge_outputs=True)
bo, _ = b.unroll(length=inputs.shape[0], inputs=inputs.flip(axis=0), layout='TNC',
merge_outputs=True)
f.reset()
b.reset()
inputs = nd.concat(fo, bo.flip(axis=0), dim=2)
return inputs
|
dc3cdc07a20e4ae5fbe257a81d92f15fb51333d9
| 18,890 |
import torch
def refer_expression(captions, n_ground=1, prefix="refer expressions:", sort=True):
"""
n_ground > 1
ground_indices
[1, 0, 2]
source_text
refer expressions: <extra_id_0> red crayon <extra_id_1> Yellow banana <extra_id_2> black cow
target_text
<vis_extra_id_1> <vis_extra_id_0> <vis_extra_id_2>
n_ground == 1
source_text
refer expressions: red crayon
target_text
<vis_extra_id_1>
"""
n_boxes = len(captions)
if sort:
ground_indices = torch.randperm(n_boxes)[:n_ground].sort().values
else:
ground_indices = torch.randperm(n_boxes)[:n_ground]
ground_indices = ground_indices.tolist()
source_text = [prefix]
target_text = []
if n_ground == 1:
idx = ground_indices[0]
source_text.append(f'{captions[idx]}')
target_text.append(f'<vis_extra_id_{idx}>')
else:
for j, idx in enumerate(ground_indices):
source_text.append(f'<extra_id_{j}>')
source_text.append(f'{captions[idx]}')
target_text.append(f'<vis_extra_id_{idx}>')
# target_text.append('</s>')
source_text = " ".join(source_text)
target_text = " ".join(target_text)
# return ground_indices, source_text, target_text
return source_text, target_text
|
57919ee416dbb981dbb7f03163beec779785cc2f
| 18,891 |
def url_to_filename(base, url):
"""Return the filename to which the page is frozen.
base -- path to the file
url -- web app endpoint of the page
"""
if url.endswith('/'):
url = url + 'index.html'
return base / url.lstrip('/')
|
35084e8b5978869bf317073c76bafc356a7d9046
| 18,892 |
def _msd_anom_3d(time, D_alpha, alpha):
"""3d anomalous diffusion function."""
return 6.0*D_alpha*time**alpha
|
e5204c52368202665e4dd4acd7d86096349c0d29
| 18,893 |
import json
def make_json_response(status_code, json_object, extra_headers=None):
"""
Helper function to serialize a JSON object and add the JSON content type header.
"""
headers = {
"Content-Type": 'application/json'
}
if extra_headers is not None:
headers.update(extra_headers)
return status_code, json.dumps(json_object), headers
|
4857b806819e44b7a77e0a9a51df7b4fe6678656
| 18,894 |
import os
def tmp_envfile(tmp_path, monkeypatch):
"""Create a temporary environment file."""
tmp_file_path = tmp_path / "setenv.txt"
monkeypatch.setenv("GITHUB_ENV", os.fspath(tmp_file_path))
return tmp_file_path
|
04deab16ce4b0e115e9fdc9b65a023f7c63f054f
| 18,895 |
from datetime import datetime
def calc_dst_temerin_li(time, btot, bx, by, bz, speed, speedx, density, version='2002n', linear_t_correction=False):
"""Calculates Dst from solar wind input according to Temerin and Li 2002 method.
Credits to Xinlin Li LASP Colorado and Mike Temerin.
Calls _jit_calc_dst_temerin_li. All constants are defined in there.
Note: vx has to be used with a positive sign throughout the calculation.
Parameters
==========
time : np.array
Array containing time variables.
btot : np.array
Array containing Btot.
bx : np.array
Array containing Bx in coordinate system ?.
by : np.array
Array containing By in coordinate system ?.
bz : np.array
Array containing Bz in coordinate system ?.
speed : np.array
Array containing solar wind speed.
speedx : np.array
Array containing solar wind speed in x-direction.
density : np.array
Array containing solar wind density.
version : str (default='2002')
String determining which model version should be used.
Returns
=======
dst_burton : np.array
Array with calculated Dst values over timesteps time.
"""
# Arrays
dst1=np.zeros(len(bz))
dst2=np.zeros(len(bz))
dst3=np.zeros(len(bz))
dst_tl=np.zeros(len(bz))
# Define initial values (needed for convergence, see Temerin and Li 2002 note)
dst1[0:10]=-15
dst2[0:10]=-13
dst3[0:10]=-2
if version == '2002':
newparams = False
else:
newparams = True
if version in ['2002', '2002n']:
# julian_days = [sunpy.time.julian_day(num2date(x)) for x in time]
julian_days = [astropy.time.Time(num2date(x), format='datetime', scale='utc').jd for x in time]
return _jit_calc_dst_temerin_li_2002(time, btot, bx, by, bz, speed, speedx, density, dst1, dst2, dst3, dst_tl, julian_days, newparams=newparams)
elif version == '2006':
dst1[0:10], dst2[0:10], dst3[0:10] = -10, -5, -10
ds1995 = time - date2num(datetime(1995,1,1))
ds2000 = time - date2num(datetime(2000,1,1))
# YEARLY DRIFT CORRECTION TERM (NOT IN PAPER)
if linear_t_correction:
drift_corr = -0.014435865642103548 * ds2000 + 9.57670996872173
else:
drift_corr = 0.
return _jit_calc_dst_temerin_li_2006(ds1995, ds2000, btot, bx, by, bz, speed, speedx, density, dst1, dst2, dst3) + drift_corr
|
f333217e34656c4566a254c1c383191f11e8c3d0
| 18,896 |
import os
def parsestrfile(str_inpath):
"""Returns dictionary containing :class:`~gemmi.Structure` objects and another one with the file names.
:param str_inpath: Either a directory or file path.
:type str_inpath: str
:raises KeyError: More than one structure file containing same identifier.
:return strdict: A dictionary containing imported :class:`~gemmi.Structure` objects.
:rtype strdict: dict [str, :class:`~gemmi.Structure`]
:return filedict: A dictionary containing file names.
:rtype filedict: dict [str, str]
"""
strdict={}
filedict={}
if os.path.isfile(str_inpath):
structure=gemmi.read_structure(str_inpath)
pdbid=structure.name.lower()
strdict[pdbid]=structure
filedict[pdbid]=os.path.basename(str_inpath)
elif os.path.isdir(str_inpath):
filelist=os.listdir(str_inpath)
for file in filelist:
if os.isfile(file):
try:
structure=gemmi.read_structure(file)
pdbid=structure.name.lower()
if pdbid in strdict:
raise KeyError('Structure '+pdbid+' loaded more than once. Check files in directory and remove duplicates.')
strdict[pdbid]=structure
filedict[pdbid]=os.path.basename(str_inpath)
except:
pass
return strdict, filedict
|
e46d7242df1c2aab7e06f29db31d15e4085ecee0
| 18,897 |
import os
import json
def handle_import(labfile, labjs):
"""랩 파일이 참고하는 외부 랩 파일 가져오기.
Args:
labfile (Str): 랩파일 경로
labjs (dict): 랩 데이터
"""
if 'import' not in labjs:
return labjs
if '_imported_' not in labjs:
labjs['_imported_'] = []
adir = os.path.dirname(labfile)
for imp in labjs['import']:
path = os.path.join(adir, f'{imp}.lab.json')
if not os.path.isfile(path):
raise FileNotFoundError(path)
with open(path, 'rt', encoding='utf8') as f:
body = f.read()
data = json.loads(body)
if 'import' in data:
handle_import(labfile, data)
labjs['_imported_'].append(AttrDict(data))
|
3b6f833e5e4044c3fd7fdead6ed1678bc945234b
| 18,898 |
def reconstruct(vars_to_reconstruct, scheme, order_used):
"""
Reconstructs all variables using the requested scheme.
:param vars_to_reconstruct: The variables at the cell centers.
:type vars_to_reconstruct: list of list of double
:param Reconstruction.Scheme scheme: The reconstruction scheme to use.
:param order_used: Filled by the function and is used to return
the order of the reconstruction used.
:type order_used: list of int
:return: (`list of list of double`) The face reconstructed variables.
Each variable is of length `2 * number_of_cells`
"""
reconstructed_vars = [None] * len(vars_to_reconstruct)
for i in range(len(vars_to_reconstruct)):
extents = np.asarray([len(vars_to_reconstruct[i])])
reconstructed_vars[i] = _recons_dispatch[scheme](
vars_to_reconstruct[i], np.asarray(extents), 1, scheme, order_used)
return np.asarray(reconstructed_vars)
|
b1e3cd8b8ed91b6c7ccdd5d6903fbce3109a3871
| 18,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.