content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import logging
def update_softwaretitle_packages(api, jssid, pkgs):
"""
Update packages of software title
:param jssid: Patch Software Title ID
:param pkgs: dict of {version: package, ...}
:returns: None
"""
logger = logging.getLogger(__name__)
data = api.get(f"patchsoftwaretitles/id/{jssid}")
title = data['patch_software_title']
title_name = title['name']
logger.info(f"updating patch software title: {title_name} ({jssid})")
# single version (dict), multiple versions (list)
version = title['versions']['version']
_modified = False
try:
# access key of single version and count on TypeError being raised
v = version['software_version']
if v in pkgs.keys():
version['package'] = {'name': pkgs[v]}
_modified = True
except TypeError:
# looks like it was actually a list
for _version in version:
v = _version['software_version']
if v in pkgs.keys():
_version['package'] = {'name': pkgs[v]}
_modified = True
if _modified:
result = api.put(f"patchsoftwaretitles/id/{jssid}", data)
logger.info(f"succesfully updated: {title_name}")
return result
else:
logger.info(f"software title was not modified") | 0acb3dfbff0e85a2e8a876d5e5d484c4d1e52068 | 5,900 |
from typing import List
def get_balances(session: Session, redis: Redis, user_ids: List[int]):
"""Gets user balances.
Returns mapping { user_id: balance }
Enqueues in Redis user balances requiring refresh.
"""
# Find user balances
query: List[UserBalance] = (
(session.query(UserBalance)).filter(UserBalance.user_id.in_(user_ids)).all()
)
# Construct result dict from query result
result = {
user_balance.user_id: {
"owner_wallet_balance": user_balance.balance,
"associated_wallets_balance": user_balance.associated_wallets_balance,
"associated_sol_wallets_balance": user_balance.associated_sol_wallets_balance,
"waudio_balance": user_balance.waudio,
"total_balance": str(
int(user_balance.balance)
+ int(user_balance.associated_wallets_balance)
+ int(user_balance.associated_sol_wallets_balance)
* 10 ** WAUDIO_DECIMALS
+ int(user_balance.waudio) * 10 ** WAUDIO_DECIMALS
),
}
for user_balance in query
}
# Find user_ids that don't yet have a balance
user_ids_set = set(user_ids)
fetched_user_ids_set = {x.user_id for x in query}
needs_balance_set = user_ids_set - fetched_user_ids_set
# Add new balances to result set
no_balance_dict = {
user_id: {
"owner_wallet_balance": "0",
"associated_wallets_balance": "0",
"associated_sol_wallets_balance": "0",
"total_balance": "0",
"waudio_balance": "0",
}
for user_id in needs_balance_set
}
result.update(no_balance_dict)
# Get old balances that need refresh
needs_refresh = [
user_balance.user_id
for user_balance in query
if does_user_balance_need_refresh(user_balance)
]
# Enqueue new balances to Redis refresh queue
# 1. All users who need a new balance
# 2. All users who need a balance refresh
enqueue_lazy_balance_refresh(redis, list(needs_balance_set) + needs_refresh)
return result | 82f6fdf0fcc8bcd241c97ab50a89ba640793b704 | 5,901 |
from typing import Optional
from typing import Tuple
def kmeans(observations: ndarray, k: Optional[int] = 5) -> Tuple[ndarray, ndarray]:
"""Partition observations into k clusters.
Parameters
----------
observations : ndarray, `shape (N, 2)` or `shape (N, 3)`
An array of observations (x, y) to be clustered.
Data should be provided as:
`[(x, y), (x, y), (x, y), ...]`
or
`[(x, y, z), (x, y, z), (x, y, z), ...]`
k : int, optional
Amount of clusters to partition observations into, by default 5
Returns
-------
center : ndarray, `shape (k, 2)` or `shape (k, 3)`
An array of positions to center of each cluster.
count : ndarray, `shape (k, )`
Array of counts of datapoints closest to the center of its cluster.
Examples
-------
>>> observations = [[6, 1], [-4, -4], [1, -7], [9, -2], [6, -6]]
>>> center, count = kmeans_2d(observations, k=2)
>>> center
[[-4, -4
5, -3]]
>>> count
[1, 4]
"""
if not isinstance(observations, ndarray):
raise TypeError("Observations must be a ndarray.")
if observations.shape[-1] == 3:
return kmeans_3d(observations, k)
elif observations.shape[-1] == 2:
return kmeans_2d(observations, k)
else:
pass | 1a8cb2e61e8d96a45d4165edf1b148fd7c8ab5e3 | 5,902 |
def encloses(coord, points):
""" """
sc = constants.CLIPPER_SCALE
coord = st(coord.to_list(), sc)
points = st(points, sc)
return pyclipper.PointInPolygon(coord, points) != 0 | d5d7aeb8f52087653027d57c7a718832dbf32200 | 5,903 |
def arpls(y, lam, ratio=1e-6, niter=1000, progressCallback=None):
"""
Return the baseline computed by asymmetric reweighted penalized least squares smoothing, arPLS.
Ref: Baseline correction using asymmetrically reweighted penalized least squares smoothing
Sung-June Baek, Aaron Park, Young-Jin Ahn and Jaebum Choo
Analyst, 2015, 140, 250-257. DOI: 10.1039/C4AN01061B
In this implementation, W is not squared so p carries the same meaning as in AsLS.
Parameters:
y: one spectrum to correct, or multiple as an array of shape (spectrum, wavenumber)
lam: lambda, the smoothness parameter
ratio: convergence criterion; target relative change in weights between iterations
niter: maximum number of iterations
progressCallback(int a, int b): callback function called to indicated that the processing
is complete to a fraction a/b.
Returns: baseline of the spectrum, measured at the same points
"""
L = y.shape[-1]
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
D = lam * D.dot(D.T)
def arpls_one(yy):
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
z = sparse.linalg.spsolve(W + D, w * yy)
d = yy - z
dn = d[d < 0]
s = dn.std()
wt = 1. / (1 + np.exp(2 / s * (d - (2*s-dn.mean()))))
if np.linalg.norm(w - wt) / np.linalg.norm(w) < ratio:
break
w = wt
return z
return mp_bgcorrection(arpls_one, y, progressCallback=progressCallback) | d149397827d89b8708a09f4ceb7c38c989d99e17 | 5,904 |
async def test_function_raised_exception(dut):
"""
Test that exceptions thrown by @function coroutines can be caught
"""
@cocotb.function
async def func():
raise ValueError()
@external
def ext():
return func()
with pytest.raises(ValueError):
await ext() | acd31a1142dea0cd300861e75721a4597e2b5bbc | 5,905 |
def dismiss_notification_mailbox(notification_mailbox_instance, username):
"""
Dismissed a Notification Mailbox entry
It deletes the Mailbox Entry for user
Args:
notification_mailbox_instance (NotificationMailBox): notification_mailbox_instance
username (string)
Return:
bool: Notification Mailbox Dismissed
"""
profile_instance = get_self(username)
NotificationMailBox.objects.filter(target_profile=profile_instance, pk=notification_mailbox_instance.id).delete()
return True | 9955361ac42c079adefcd8402fb9a1d5e3822a57 | 5,906 |
import operator
def knn(x, y, k, predict_x):
"""
knn็ฎๆณๅฎ็ฐ๏ผไฝฟ็จๆฌงๆฐ่ท็ฆป
:param x: ๆ ทๆฌๅผ
:param y: ๆ ็ญพ
:param k: ไธชๆฐ
:return:
"""
assert isinstance(y, np.ndarray)
y = y.flatten('F')
def cal_distance(a, b):
return np.sqrt(np.sum(np.power(a - b, 2), axis=0))
dists = {
}
for (index, sample) in enumerate(x):
dists[index] = cal_distance(sample, predict_x)
k_sample = sorted(dists.items(), key=operator.itemgetter(1))[:k]
k_labels = y[[key for (key, value) in k_sample]]
counters = {
}
for k in k_labels:
if k not in counters.keys():
counters[k] = 1
else:
counters[k] += 1
return sorted(counters.items(), key=operator.itemgetter(1))[0] | 425095898acce2fc966d00d4ba6bc8716f1062f8 | 5,907 |
def piano():
"""A piano instrument."""
return lynames.Instrument('Piano', abbr='Pno.', transposition=None,
keyboard=True, midi='acoustic grand',
family='percussion', mutopianame='Piano') | 792a1dd3655ac038bdde27f9d1ad27451e2b9121 | 5,908 |
from typing import Any
from typing import Dict
def extract_fields(obj: Any) -> Dict[str, Any]:
"""A recursive function that extracts all fields in a Django model, including related fields (e.g. many-to-many)
:param obj: A Django model
:return: A dictionary containing fields and associated values
"""
sub_content = {}
if obj is not None:
# Gets a list of any Django model fields
fields = type(obj)._meta.get_fields()
for field in fields:
if issubclass(field.__class__, ForeignKey):
sub_content[field.name] = extract_fields(getattr(obj, field.name))
elif issubclass(field.__class__, RelatedField):
sub_content[field.name] = [extract_fields(sub_obj) for sub_obj in list(getattr(obj, field.name).all())]
elif issubclass(field.__class__, Field):
sub_content[field.name] = getattr(obj, field.name)
return sub_content | bc6b45a82ab2a336e116ce528aaed45b2b77ef39 | 5,909 |
from re import T
def decomposeM(modified):
"""Auxiliary in provenance filtering: split an entry into name and date."""
splits = [m.rsplit(ON, 1) for m in modified]
return [(m[0], dtm(m[1].replace(BLANK, T))[1]) for m in splits] | 1f613d11d2f8c3ceec4f6c853b9412b5b7eb3e0c | 5,910 |
import logging
def update(data):
"""
TODO:
find a way to call collection.findOneAndUpdate(), currently pymodm .update()
only returns the number of updated record.
"""
try:
required_fields = ['id']
validator.validate_required_fields(required_fields, data)
cleaned_data = user_prevalidation(data)
updated_data = {key: val for key, val in cleaned_data.items()
if val is not None}
db_id = updated_data.pop('id')
_get_user({'id': db_id}) # call to validate if user exist
user_entitymanager.update(db_id, updated_data)
user = _get_user({'id': db_id}) # call to get the updated data.
return flask_helper.ResponseHelper(user.to_dict(), http_status_code.OK)
except Exception as e:
logging.error(e)
raise | a3895574b5e811e91db2063bdabc3bd297d7a904 | 5,911 |
def get_2d_peaks_coords(
data: np.ndarray, size: int = None, threshold: float = 0.5
) -> np.ndarray:
"""Detect peaks in image data, return coordinates.
If neighborhoods size is None, default value is the highest value
between 50 pixels and the 1/40th of the smallest image dimension.
Detection threshold is relative to difference between data maximum and minimum.
"""
if size is None:
size = max(min(data.shape) // 40, 50)
data_max = spf.maximum_filter(data, size)
data_min = spf.minimum_filter(data, size)
data_diff = data_max - data_min
abs_threshold = (data_diff.max() - data_diff.min()) * threshold
diff = (data_max - data_min) > abs_threshold
maxima = data == data_max
maxima[diff == 0] = 0
labeled, _num_objects = spi.label(maxima)
slices = spi.find_objects(labeled)
coords = []
for dy, dx in slices:
x_center = int(0.5 * (dx.start + dx.stop - 1))
y_center = int(0.5 * (dy.start + dy.stop - 1))
coords.append((x_center, y_center))
if len(coords) > 1:
# Eventually removing duplicates
dist = distance_matrix(coords)
for index in reversed(np.unique(np.where((dist < size) & (dist > 0))[1])):
coords.pop(index)
return np.array(coords) | 815979bd0105acc7bb3fb58db691a8963d9ca2f4 | 5,912 |
def border_positions_from_texts(texts, direction, only_attr=None):
"""
From a list of textboxes in <texts>, get the border positions for the respective direction.
For vertical direction, return the text boxes' top and bottom border positions.
For horizontal direction, return the text boxes' left and right border positions.
<direction> must be DIRECTION_HORIZONTAL or DIRECTION_VERTICAL from pdftabextract.common.
optional <only_attr> must be either 'low' (only return 'top' or 'left' borders) or 'high' (only return 'bottom' or
'right').
Border positions are returned as sorted NumPy array.
"""
if direction not in (DIRECTION_HORIZONTAL, DIRECTION_VERTICAL):
raise ValueError("direction must be DIRECTION_HORIZONTAL or DIRECTION_VERTICAL (see pdftabextract.common)")
if only_attr is not None and only_attr not in ('low', 'high'):
raise ValueError("only_attr must be either 'low' or 'high' if not set to None (default)")
if direction == DIRECTION_VERTICAL:
attr_lo = 'top'
attr_hi = 'bottom'
else:
attr_lo = 'left'
attr_hi = 'right'
positions = []
for t in texts:
if only_attr is None or only_attr == 'low':
positions.append(t[attr_lo])
if only_attr is None or only_attr == 'high':
positions.append(t[attr_hi])
return np.array(sorted(positions)) | 8b0f57e21b015b6092104454195254861432b610 | 5,913 |
def progress(self):
"""Check if foo can send to corge"""
return True | 89a0c9671645f9fa855db35bf5e383145d6b7616 | 5,914 |
def write_sample_sdf(input_file_name, valid_list):
"""
Function for writing a temporary file with a subset of pre-selected
structures
:param input_file_name: name of input file
:param valid_list: list of indexes of pre-selected structures
:return: name of subsampled file
"""
sample_file_name = '{}_sample.sdf'.format(input_file_name.split('.')[0])
sample_file = open(sample_file_name, 'w')
mol = []
i = 0
for line in open(input_file_name):
mol.append(line)
if line[:4] == '$$$$':
i += 1
if i in valid_list:
for mol_line in mol:
sample_file.write(mol_line)
valid_list.remove(i)
mol = []
else:
mol = []
sample_file.close()
return sample_file_name | 0b22c14452f6de978e7ea811d761195d92bfe6c4 | 5,915 |
import math
def rotx(theta, unit="rad"):
"""
ROTX gives rotation about X axis
:param theta: angle for rotation matrix
:param unit: unit of input passed. 'rad' or 'deg'
:return: rotation matrix
rotx(THETA) is an SO(3) rotation matrix (3x3) representing a rotation
of THETA radians about the x-axis
rotx(THETA, "deg") as above but THETA is in degrees
"""
check_args.unit_check(unit)
if unit == "deg":
theta = theta * math.pi / 180
ct = math.cos(theta)
st = math.sin(theta)
mat = np.matrix([[1, 0, 0], [0, ct, -st], [0, st, ct]])
mat = np.asmatrix(mat.round(15))
return mat | b05a6116c64837de163ad26dc36ffe1a7166635d | 5,916 |
from typing import Sequence
def _table(*rows: Sequence) -> str:
"""
>>> _table(['a', 1, 'c', 1.23])
'|a|1|c|1.23|'
>>> _table(['foo', 0, None])
'|foo|||'
>>> print(_table(['multiple', 'rows', 0], ['each', 'a', 'list']))
|multiple|rows||
|each|a|list|
"""
return '\n'.join([
'|'.join(['', *[str(cell or '') for cell in row], '']) for row in rows
]) | d566da2ad9240e73b60af00d3e4b4e25607234b4 | 5,917 |
def trunc(s, n):
"""
Truncate a string to N characters, appending '...' if truncated.
trunc('1234567890', 10) -> '1234567890'
trunc('12345678901', 10) -> '1234567890...'
"""
if not s:
return s
return s[:n] + '...' if len(s) > n else s | 0f3c9f03f566f9f50a557f6b5592ec20a12e92bc | 5,918 |
import os
def sgrib_variable_crop(tmp_grib, nthreads_w, fp_out, logger):
"""
Take the small grib file from grib_to_small_grib and cut it down
to the variables we need
Args:
tmp_grib: File path to small grib2 file
nthreads_w: Number of threads for running wgrib2 commands
fp_out: Path for outputting final grib2 file
Returns:
"""
# call to grab correct variables
action2 = "wgrib2 {} -ncpu {} -match \
'TMP:2 m|UGRD:10 m|VGRD:10 m|TCDC:' -GRIB {}"
action2 = action2.format(tmp_grib,
nthreads_w,
fp_out)
fatl = call_wgrib2(action2, logger)
if fatl:
logger.warning(
'Cutting variables from grib did not work')
os.remove(tmp_grib)
return not fatl | 25c55b6e8a2af23e10c11aa5e7df2caa9cd79bdd | 5,919 |
def cs_geo():
"""Geographic lat/lon coordinates in WGS84 datum.
"""
cs = CSGeo()
cs.inventory.datumHoriz = "WGS84"
cs.inventory.datumVert = "mean sea level"
cs.inventory.spaceDim = 2
cs._configure()
cs.initialize()
return cs | 28df90e7b1490d681c9d13f4604dbc3966d896dc | 5,920 |
def make_range(value):
"""
Given an integer 'value',
return the value converted into a range.
"""
return range(value) | 385d23eaebd04249f9384e0d592b7fb3a9bbb457 | 5,921 |
def run(actor, observer, content):
"""
Shortcut to run an Onirim and return the result.
Returns:
True if win, False if lose, None if other exception thrown.
"""
return Flow(Core(actor, observer, content)).whole() | 03b1dee5bd993d8a88debd558878de5a32e9c318 | 5,922 |
def GetPoseBoneFCurveFromArmature(armatureObj, poseBoneName, data_path, parameterIndex):
"""
In Blender the FCurves are used to define the Key Frames.
In general, for a single object, there's one FCurve for each of
the following properties.
data_path, index
'location', 0 (.x)
'location', 1 (.y)
'location', 2 (.z)
'rotation_quaternion', 0 (.w)
'rotation_quaternion', 1 (.x)
'rotation_quaternion', 2 (.y)
'rotation_quaternion', 3 (.z)
'scale', 0 (.x)
'scale', 1 (.y)
'scale', 2 (.z)
For more tips about this, see: https://docs.blender.org/api/blender_python_api_2_75_release/info_quickstart.html#animation
Returns a bpy.types.FCurve
"""
completePath = BuildPoseBoneFCurveDataPath(poseBoneName, data_path)
return armatureObj.animation_data.action.fcurves.find(completePath, index=parameterIndex) | 450d98306adf43ea171dffa0fe6afa71ebabce57 | 5,923 |
def get_document_instance(conf=None):
"""
Helper function to get a database Document model instance based on CLA configuration.
:param conf: Same as get_database_models().
:type conf: dict
:return: A Document model instance based on configuration specified.
:rtype: cla.models.model_interfaces.Document
"""
return get_database_models(conf)['Document']() | 054f6ff6acc38ed44a9bd2a97e0598ed34b322f8 | 5,924 |
from typing import List
def get_full_private_keys(gpg: gnupg.GPG) -> List[GPGKey]:
"""Get a list of private keys with a full private part.
GPG supports exporting only the subkeys for a given key, and in this case
a stub of the primary private key is also exported (the stub). This stub
cannot be used to do anything with the primary key, so it's useful to list
only keys that can actually be used.
:param gpg: The GPG interface used by the gnupg library
:return: The list of fully available private keys in the keyring
"""
return [key for key in get_private_keys(gpg) if key.key_token == KeyToken.FULL] | d2bbb248613c3be9ed103212e0ca2a433de07e03 | 5,925 |
def create_blueprint():
"""Creates a Blueprint"""
blueprint = Blueprint('Health Check Blueprint', __name__)
blueprint.route('/')(healthcheck.healthcheck)
return blueprint | 348c6ff172bb0d230d83eab73dd451edba0d1b00 | 5,926 |
def playable_card(card, fireworks, n_colors):
# if isinstance(card, pyhanabi.HanabiCard):
# card = {'color':colors[card.color],'rank':card.rank}
"""A card is playable if it can be placed on the fireworks pile."""
if (card.color == pyhanabi.HanabiCard.ColorType.kUnknownColor
and card().rank != pyhanabi.HanabiCard.RankType.kUnknownRank):
for color in range(n_colors):
if fireworks[color] == card.rank:
continue
else:
return False
return True
# elif card['color'] == None or card['rank'] == None:
if (card.color == pyhanabi.HanabiCard.ColorType.kUnknownColor
and card().rank == pyhanabi.HanabiCard.RankType.kUnknownRank):
return False
else:
return card.rank == fireworks[card.color] | a96c6935c6b57ead9c639f13d8eccccbaf21aa4b | 5,927 |
def get_transformation_id(action):
""" Get the id of a transformation.
Parameters
----------
action: function
The transformation function
Returns
-------
int
The id of the action (-1 if not found)
"""
for index, trans in TRANSFORMATIONS.items():
if trans == action:
return index
return -1 | 2f08e7bb2b0418d39421e6b03e011d8ab4d68380 | 5,928 |
def getString(t):
"""If t is of type string, return it, otherwise raise InvalidTypeError.
"""
s = c_char_p()
if PL_get_chars(t, byref(s), REP_UTF8|CVT_STRING):
return s.value
else:
raise InvalidTypeError("string") | 1f128369f1ce3950ed352e43eea5db30f6da2d6e | 5,929 |
def prep_data(filename, in_len, pred_len):
"""load data from the file and chunk it into windows of input"""
# Columns are
# 0:datetime, 1:temperature, 2:humidity, 3:pressure, 4:wind_direction, 5:wind_speed
data = np.genfromtxt(filename, delimiter=',', skip_header=1,
usecols=(1, 2, 3, 4, 5), dtype=float)
# Remove rows that are missing values
data = data[~np.isnan(data).any(axis=1)]
# We will save the last 1/8th of the data for validation/testing data,
# 1/16 for validation, 1/16 for testing
total_len = data.shape[0]
val_len = total_len // 16
test_len = total_len // 16
train_len = total_len - val_len - test_len
train_data = data[:train_len]
val_data = data[train_len:train_len + val_len]
test_data = data[train_len + val_len:]
# To stay in the most accurate ranges of the ESN, and to put the various
# features on equal footing, we standardize the training data.
train_data, mu_arr, sigma_arr = standardize_traindata(train_data)
# We now need to scale our validation and test data by the means and standard
# deviations determined from the training data
val_data = scale_data(val_data, mu_arr, sigma_arr)
test_data = scale_data(test_data, mu_arr, sigma_arr)
# We need to convert the time series data to forecast form for one-step
# prediction training. For simplicity we will discard the remainder batches rU, rY
train_batch_size = 200
val_batch_size = in_len + pred_len + 1
test_batch_size = in_len + pred_len + 1
trainU, trainY, rU, rY = to_forecast_form(train_data, batch_size=train_batch_size)
valU, valY, rU, rY = to_forecast_form(val_data, batch_size=val_batch_size)
testU, testY, rU, rY = to_forecast_form(test_data, batch_size=test_batch_size)
return trainU, trainY, valU, valY, testU, testY, mu_arr, sigma_arr | 33e1348acdcf6025159b7ed81e18358d56838d3e | 5,930 |
import subprocess
import os
import glob
def should_build_ib():
"""
Helper function that detects the system's IB support and returns if we
should build with IB support.
"""
ib_util_found = False
ib_lib_found = False
ib_header_found = False
try:
# If the command doesn't exist, we can directly return instead of
# making a subprocess call
full_cmd_path = get_command_path(IB_DEVINFO_CMD)
if not full_cmd_path:
ib_util_found = False
subprocess.check_output([full_cmd_path, "--list"])
# Here we just would like to simply run the command to test if IB
# related tools / lib are installed without parsing the output. We
# will enable IB build as long as the command runs successfully.
#
# The output should look like either:
#
# > ibv_devinfo --list
# 0 HCAs founds:
#
# or
#
# > ibv_devinfo --list
# 4 HCAs found:
# mlx5_3
# mlx5_2
# mlx5_1
# mlx5_0
ib_util_found = True
except Exception:
# We just take all the exceptions here without affecting the build
ib_util_found = False
lib_paths = list(filter(bool, [
"/usr/lib/",
"/usr/lib/x86_64-linux-gnu/",
"/usr/lib/powerpc64le-linux-gnu/",
"/usr/lib/aarch64-linux-gnu/",
] + gather_paths([
"LIBRARY_PATH",
]) + gather_paths([
"LD_LIBRARY_PATH",
])))
include_paths = [
"/usr/include/",
]
if IS_CONDA:
lib_paths.append(os.path.join(CONDA_DIR, "lib"))
include_paths.append(os.path.join(CONDA_DIR, "include"))
for path in lib_paths:
if path is None or not os.path.exists(path):
continue
ib_libraries = sorted(glob.glob(os.path.join(path, "libibverbs*")))
if ib_libraries:
ib_lib_found = True
break
for path in include_paths:
if path is None or not os.path.exists(path):
continue
if os.path.exists(os.path.join(path, "infiniband/verbs.h")):
ib_header_found = True
break
return ib_util_found and ib_lib_found and ib_lib_found | f5b26870f39b124690a7869e2c56997d51e6d499 | 5,931 |
def _get_security_group_id(connection, security_group_name):
"""
Takes a security group name and
returns the ID. If the name cannot be
found, the name will be attempted
as an ID. The first group found by
this name or ID will be used.)
:param connection:
:param security_group_name:
:return:
"""
if not security_group_name:
print('The bees need a security group to run under. Need to open a port from where you are to the target '
'subnet.')
return
# Try by name
security_groups = connection.describe_security_groups(
Filters=[{'Name': 'group-name', 'Values': [security_group_name, ]}, ]
)
security_groups = security_groups['SecurityGroups']
if not security_groups:
# Try by id
security_groups = connection.describe_security_groups(
Filters=[{'Name': 'group-id', 'Values': [security_group_name, ]}, ]
)
security_groups = security_groups['SecurityGroups']
if not security_groups:
print('The bees need a security group to run under. The one specified was not found. '
'Create a sg that has access to port 22 ie. from 0.0.0.0/0')
return
return security_groups[0]['GroupId'] if security_groups else None | 70c9b8357a9634043f07ad0019ff3cc621ba859c | 5,932 |
def viz_preprocessing(df_path):
"""
Preprocess the aggregation csv into a good format for visualization
"""
df = pd.read_csv(df_path)
res = df.T
res = res.rename(columns=res.iloc[0]).drop(res.index[0])
res = res.astype("int64")
res.reset_index(inplace=True)
res["index"] = res["index"].apply(
lambda x: "{}-{}-{}".format(x[0:4], x[4:6], x[6:])
)
res["index"] = pd.to_datetime(res["index"])
return res | fc1c39d094934aa47ac26f6e5a70f071c1df4fbd | 5,933 |
def build_encoded_broadcast_from_model(model_fn, encoder_fn):
"""Builds `StatefulBroadcastFn` for weights of model returned by `model_fn`.
This method creates a `SimpleEncoder` for every weight of model created by
`model_fn`, as returned by `encoder_fn`.
Args:
model_fn: A Python callable with no arguments function that returns a
`tff.learning.Model`.
encoder_fn: A Python callable with a single argument, which is expected to
be a `tf.Tensor` of shape and dtype to be encoded. The function must
return a `tensor_encoding.core.SimpleEncoder`, which expects a `tf.Tensor`
with compatible type as the input to its `encode` method.
Returns:
A `StatefulBroadcastFn` for encoding and broadcasting the weights of model
created by `model_fn`.
Raises:
TypeError: If `model_fn` or `encoder_fn` are not callable objects.
"""
py_typecheck.check_callable(model_fn)
py_typecheck.check_callable(encoder_fn)
# TODO(b/144382142): Keras name uniquification is probably the main reason we
# still need this.
with tf.Graph().as_default():
values = model_utils.enhance(model_fn()).weights
encoders = tf.nest.map_structure(encoder_fn, values)
return tff.utils.build_encoded_broadcast(values, encoders) | 59b7290fe00b565467a66f72f6591f27448b9372 | 5,934 |
def adjacency(G, nodelist=None, weight="weight"):
"""
Returns the sparse adjacency matrix
representation of the graph.
"""
if nodelist is None:
nodelist = G.nodes()
A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight, format="csr")
return A | e17c0030a7d2c4e13659ca3585820e1c8da89101 | 5,935 |
from datetime import datetime
def sample_movie(user, **params):
"""Create and return a movie"""
defaults = {
'title': 'A Walk to Remember',
'duration': datetime.timedelta(hours=2, minutes=15),
'price': 8.99
}
defaults.update(params)
return Movie.objects.create(user=user, **defaults) | d07716fbe4b043022592ae2465bb02d02f45fe41 | 5,936 |
import difflib
def lines_diff(lines1, lines2):
"""Show difference between lines."""
is_diff = False
diffs = list()
for line in difflib.ndiff(lines1, lines2):
if not is_diff and line[0] in ('+', '-'):
is_diff = True
diffs.append(line)
return is_diff, diffs | 50916d46871980fadfd854dc698481a4b0f35834 | 5,937 |
import re
def parse_ipmi_hpm(output):
"""Parse the output of the hpm info retrieved with ipmitool"""
hrdw = []
line_pattern = re.compile(r'^\|[^0-9]*([0-9]+)\|[^a-zA-Z ]* ?([^\|]*)\|([^\|]*)\|([^\|]*)\|([^\|]*)\|')
for line in output:
match = line_pattern.match(line)
if match:
name = match.group(2).strip()
version = match.group(3).strip().split(" ")[0]
hrdw.append(('firmware', name, 'version', version))
return hrdw | 001731ce46fa6bbdb5103727265a0bdd353773be | 5,938 |
def get_genes_and_pathways(reactions, r_numbers, species):
"""Returns a CSV-formatted string with the list of genes and pathways where
the reaction(s) of 'species' appear.
:param reactions: list of reactions for species
:param r_numbers: RNumbers object
:param species: KEGG organism code
:return: CSV-formatted string with genes and pathways where reactions of
species are present
"""
gene_set = set()
pathway_set = set()
for reaction in reactions:
organism = r_numbers.find(reaction).find(species)
assert organism is not None
for gene in organism.genes:
gene_set.add(gene.replace(species + ':', ''))
for pathway in organism.pathways:
pathway_set.add(pathway)
gene_col = ' '.join(sorted(gene_set))
pathway_col = ' '.join(sorted(pathway_set))
return gene_col.rstrip() + ';' + pathway_col.rstrip() + ';' | 0ecddcaf50650b04125be73bcf6b304a77df011d | 5,939 |
import os
def datasetFiles(request):
"""
Return a list all dataset files in the datasets directory, by looking for files ending
with .h5 suffix. eg. ['/Users/jarnyc/BioPyramid/data/datasets/lanner.1.0.h5']
"""
# This is the dataset directory, set by the config file
datadir = request.registry.settings['biopyramid.model.datadir']
# Go through each file in the directory and fetch files with .h5 suffix
filepaths = []
for filename in os.listdir(datadir):
if filename.endswith(".h5"):
filepaths.append(os.path.join(datadir, filename))
return filepaths | 0c4e2ffff720ec24b6f673f059baa023458f72e9 | 5,940 |
def relate_ca(assessment, template):
"""Generates custom attribute list and relates it to Assessment objects
Args:
assessment (model instance): Assessment model
template: Assessment Temaplte instance (may be None)
"""
if not template:
return None
ca_definitions = all_models.CustomAttributeDefinition.query.options(
orm.undefer_group('CustomAttributeDefinition_complete'),
).filter_by(
definition_id=template.id,
definition_type="assessment_template",
).order_by(
all_models.CustomAttributeDefinition.id
)
created_cads = []
for definition in ca_definitions:
cad = all_models.CustomAttributeDefinition(
title=definition.title,
definition=assessment,
attribute_type=definition.attribute_type,
multi_choice_options=definition.multi_choice_options,
multi_choice_mandatory=definition.multi_choice_mandatory,
mandatory=definition.mandatory,
helptext=definition.helptext,
placeholder=definition.placeholder,
)
db.session.add(cad)
created_cads.append(cad)
return created_cads | 31744ac40f385746e6d4e13a97ed461312280d99 | 5,941 |
def getSenderNumberMgtURL(request):
"""
๋ฐ์ ๋ฒํธ ๊ด๋ฆฌ ํ์
URL์ ๋ฐํํฉ๋๋ค.
- ๋ณด์์ ์ฑ
์ ๋ฐ๋ผ ๋ฐํ๋ URL์ 30์ด์ ์ ํจ์๊ฐ์ ๊ฐ์ต๋๋ค.
- https://docs.popbill.com/fax/python/api#GetSenderNumberMgtURL
"""
try:
# ํ๋นํ์ ์ฌ์
์๋ฒํธ
CorpNum = settings.testCorpNum
# ํ๋นํ์ ์์ด๋
UserID = settings.testUserID
url = faxService.getSenderNumberMgtURL(CorpNum, UserID)
return render(request, 'url.html', {'url': url})
except PopbillException as PE:
return render(request, 'exception.html', {'code': PE.code, 'message': PE.message}) | 371ca0a813c54061c68af34719ca132081f0bfda | 5,942 |
def closest_match(match, specs, depth=0):
"""
Recursively iterates over type, group, label and overlay key,
finding the closest matching spec.
"""
new_specs = []
match_lengths = []
for i, spec in specs:
if spec[0] == match[0]:
new_specs.append((i, spec[1:]))
else:
if all(isinstance(s[0], basestring) for s in [spec, match]):
match_length = max(i for i in range(len(match[0]))
if match[0].startswith(spec[0][:i]))
elif is_number(match[0]) and is_number(spec[0]):
m = bool(match[0]) if isinstance(match[0], np.bool_) else match[0]
s = bool(spec[0]) if isinstance(spec[0], np.bool_) else spec[0]
match_length = -abs(m-s)
else:
match_length = 0
match_lengths.append((i, match_length, spec[0]))
if len(new_specs) == 1:
return new_specs[0][0]
elif new_specs:
depth = depth+1
return closest_match(match[1:], new_specs, depth)
else:
if depth == 0 or not match_lengths:
return None
else:
return sorted(match_lengths, key=lambda x: -x[1])[0][0] | 3a212d880004fad843fe2d254ac96315bd1d12cf | 5,943 |
def average(w, axis=-1):
"""Calculate average
Example:
>>> w1=Waveform([range(2), range(2)],array([[1.0, 3.0], [0.0, 5.0]]))
>>> average(w1)
Waveform(array([0, 1]), array([ 2. , 2.5]))
>>> w1=Waveform([range(2), range(2)],array([[1.0, 3.0], [0.0, 5.0]]), \
xlabels=['row','col'])
>>> average(w1, axis='row')
Waveform(array([0, 1]), array([ 0.5, 4. ]))
"""
return reducedim(w, np.mean(w._y, axis=w.getaxis(axis)),
axis=w.getaxis(axis)) | bd5510e78c995e0a9f656144393b0496e071cdf5 | 5,944 |
def random():
"""Return a random parameter set for the model."""
total_thickness = 10**np.random.uniform(2, 4.7)
Nlayers = np.random.randint(2, 200)
d_spacing = total_thickness / Nlayers
thickness = d_spacing * np.random.uniform(0, 1)
length_head = thickness * np.random.uniform(0, 1)
length_tail = thickness - length_head
Caille_parameter = np.random.uniform(0, 0.8)
pars = dict(
length_head=length_head,
length_tail=length_tail,
Nlayers=Nlayers,
d_spacing=d_spacing,
Caille_parameter=Caille_parameter,
)
return pars | 958410bb8a696652b5a58cb15168719c2391179d | 5,945 |
def extract_features_to_dict(image_dir, list_file):
"""extract features and save them with dictionary"""
label, img_list = load_image_list(image_dir, list_file)
ftr = feature
integer_label = label_list_to_int(label)
feature_dict = {'features': ftr,
'label': integer_label,
'label_original': string_list_to_cells(label),
'image_path': string_list_to_cells(img_list)}
return feature_dict | 2fe641d7bcc24f293fae0c8badf274c9f32051d4 | 5,946 |
import torch
from typing import List
from typing import Dict
def roi_heads_forward(
self,
features, # type: Dict[str, Tensor]
proposals, # type: List[Tensor]
image_shapes, # type: List[Tuple[int, int]]
targets=None, # type: Optional[List[Dict[str, Tensor]]]
):
# type: (...) -> Tuple[List[Dict[str, Tensor]], Dict[str, Tensor]]
"""
Args:
features (List[Tensor])
proposals (List[Tensor[N, 4]])
image_shapes (List[Tuple[H, W]])
targets (List[Dict])
"""
if targets is not None:
for t in targets:
# TODO: https://github.com/pytorch/pytorch/issues/26731
floating_point_types = (torch.float, torch.double, torch.half)
assert t["boxes"].dtype in floating_point_types, "target boxes must of float type"
assert t["labels"].dtype == torch.int64, "target labels must of int64 type"
if self.has_keypoint():
assert t["keypoints"].dtype == torch.float32, "target keypoints must of float type"
if self.training:
proposals, matched_idxs, labels, regression_targets, length_labels = self.select_training_samples(proposals, targets)
else:
labels = None
regression_targets = None
matched_idxs = None
box_features = self.box_roi_pool(features, proposals, image_shapes)
box_features = self.box_head(box_features)
class_logits, box_regression, length_logits = self.box_predictor(box_features)
result: List[Dict[str, torch.Tensor]] = []
losses = {}
if self.training:
assert labels is not None and regression_targets is not None and length_labels is not None
loss_classifier, loss_box_reg, loss_length = fastrcnn_loss(class_logits, box_regression, labels, regression_targets, length_labels, length_logits)
losses = {"loss_classifier": loss_classifier, "loss_box_reg": loss_box_reg, "loss_vessel_length": loss_length}
else:
boxes, scores, labels, lengths = postprocess_detections(class_logits, box_regression, proposals, image_shapes, length_logits)
num_images = len(boxes)
for i in range(num_images):
result.append(
{
"boxes": boxes[i],
"labels": labels[i],
"scores": scores[i],
"lengths": lengths[i]
}
)
if self.has_mask():
mask_proposals = [p["boxes"] for p in result]
if self.training:
assert matched_idxs is not None
# during training, only focus on positive boxes
num_images = len(proposals)
mask_proposals = []
pos_matched_idxs = []
for img_id in range(num_images):
pos = torch.where(labels[img_id] > 0)[0]
mask_proposals.append(proposals[img_id][pos])
pos_matched_idxs.append(matched_idxs[img_id][pos])
else:
pos_matched_idxs = None
if self.mask_roi_pool is not None:
mask_features = self.mask_roi_pool(features, mask_proposals, image_shapes)
mask_features = self.mask_head(mask_features)
mask_logits = self.mask_predictor(mask_features)
else:
raise Exception("Expected mask_roi_pool to be not None")
loss_mask = {}
if self.training:
assert targets is not None
assert pos_matched_idxs is not None
assert mask_logits is not None
gt_masks = [t["masks"] for t in targets]
gt_labels = [t["labels"] for t in targets]
rcnn_loss_mask = maskrcnn_loss(mask_logits, mask_proposals, gt_masks, gt_labels, pos_matched_idxs)
loss_mask = {"loss_mask": rcnn_loss_mask}
else:
labels = [r["labels"] for r in result]
masks_probs = maskrcnn_inference(mask_logits, labels)
for mask_prob, r in zip(masks_probs, result):
r["masks"] = mask_prob
losses.update(loss_mask)
# keep none checks in if conditional so torchscript will conditionally
# compile each branch
if (
self.keypoint_roi_pool is not None
and self.keypoint_head is not None
and self.keypoint_predictor is not None
):
keypoint_proposals = [p["boxes"] for p in result]
if self.training:
# during training, only focus on positive boxes
num_images = len(proposals)
keypoint_proposals = []
pos_matched_idxs = []
assert matched_idxs is not None
for img_id in range(num_images):
pos = torch.where(labels[img_id] > 0)[0]
keypoint_proposals.append(proposals[img_id][pos])
pos_matched_idxs.append(matched_idxs[img_id][pos])
else:
pos_matched_idxs = None
keypoint_features = self.keypoint_roi_pool(features, keypoint_proposals, image_shapes)
keypoint_features = self.keypoint_head(keypoint_features)
keypoint_logits = self.keypoint_predictor(keypoint_features)
loss_keypoint = {}
if self.training:
assert targets is not None
assert pos_matched_idxs is not None
gt_keypoints = [t["keypoints"] for t in targets]
rcnn_loss_keypoint = keypointrcnn_loss(
keypoint_logits, keypoint_proposals, gt_keypoints, pos_matched_idxs
)
loss_keypoint = {"loss_keypoint": rcnn_loss_keypoint}
else:
assert keypoint_logits is not None
assert keypoint_proposals is not None
keypoints_probs, kp_scores = keypointrcnn_inference(keypoint_logits, keypoint_proposals)
for keypoint_prob, kps, r in zip(keypoints_probs, kp_scores, result):
r["keypoints"] = keypoint_prob
r["keypoints_scores"] = kps
losses.update(loss_keypoint)
return result, losses | fae859b8e986694d457e9d4933071eed76a49142 | 5,947 |
def capitalize(s):
"""capitalize(s) -> string
Return a copy of the string s with only its first character
capitalized.
"""
return s.capitalize() | 1c9b86e2bbffc486d624e7305f303d517a282b75 | 5,948 |
def S_tunnel_e0(self, mu, sig, Efl, Efr, Tl, Tr):
"""energy flux
Conduction band edge 0 at higher of the two
"""
a = mu-sig/2
b = mu+sig/2
kTl = sc.k*Tl
kTr = sc.k*Tr
Blr = (a/kTl+1)*np.exp(-a/kTl)-(b/kTl+1)*np.exp(-b/kTl)
Brl = (a/kTr+1)*np.exp(-a/kTr)-(b/kTr+1)*np.exp(-b/kTr)
Slr = kTl**3*Blr*np.exp(Efl/kTl)
Srl = -kTr**3*Brl*np.exp(Efr/kTr)
# Slr = kTl**3*Blr
# Srl = -kTr**3*Brl
ret = self._cS*(Slr+Srl)
return ret | 224b115d7205994e897bc74010fd4f24d562cc6c | 5,949 |
def to_camel_java(text, first_lower=True):
"""Returns the text in camelCase or CamelCase format for Java
"""
return to_camelcase(text, first_lower=first_lower,
reserved_keywords=JAVA_KEYWORDS, suffix="_") | c14b102502d7caa1dc51511ffd3c97f736a5c17b | 5,950 |
def rectangle_field(N_1, N_2, B_1, B_2, H, D, r_b):
"""
Build a list of boreholes in a rectangular bore field configuration.
Parameters
----------
N_1 : int
Number of borehole in the x direction.
N_2 : int
Number of borehole in the y direction.
B_1 : float
Distance (in meters) between adjacent boreholes in the x direction.
B_2 : float
Distance (in meters) between adjacent boreholes in the y direction.
H : float
Borehole length (in meters).
D : float
Borehole buried depth (in meters).
r_b : float
Borehole radius (in meters).
Returns
-------
boreField : list of Borehole objects
List of boreholes in the rectangular bore field.
Examples
--------
>>> boreField = gt.boreholes.rectangle_field(N_1=3, N_2=2, B_1=5., B_2=5.,
H=100., D=2.5, r_b=0.05)
The bore field is constructed line by line. For N_1=3 and N_2=2, the bore
field layout is as follows::
3 4 5
0 1 2
"""
borefield = []
for j in range(N_2):
for i in range(N_1):
borefield.append(Borehole(H, D, r_b, x=i*B_1, y=j*B_2))
return borefield | 955bc7f2bf3a79d790683e7589010bc81af98f85 | 5,951 |
def convertHunit(conc, from_unit='H/10^6 Si', to_unit='ppm H2O', phase='Fo90',
printout=True):
"""
Convert hydrogen concentrations to/from H/10^6 Si and ppm H2O.
Based on Table 3 of Denis et al. 2013
"""
if phase == 'Fo90':
H_to_1_ppm = 16.35
elif phase == 'opx':
H_to_1_ppm = 11.49
elif phase == 'cpx':
H_to_1_ppm = 11.61
else:
print('Valid options for phase are Fo90, opx, and cpx')
return
if from_unit == 'H/10^6 Si':
if to_unit == 'ppm H2O':
new_conc = conc / H_to_1_ppm
elif to_unit == 'per m3':
new_conc = conc * (1.0/308.67) * (1e30)
else:
print('only going to units "ppm H2O" and "per m3"')
return
elif from_unit == 'ppm H2O':
if to_unit == 'H/10^6 Si':
new_conc = conc * H_to_1_ppm
elif to_unit == 'per m3':
new_conc = (conc * H_to_1_ppm) * (1.0/308.67) * (1e30)
else:
print('only going to "H/10^6 Si" or "per m3"')
return
elif from_unit == 'per m3':
if to_unit == 'H/10^6 Si':
new_conc = conc / ((1.0/308.67) * (1e30))
elif to_unit == 'ppm H2O':
new_conc = (conc / ((1.0/308.67) * (1e30))) / H_to_1_ppm
else:
print('only going to "H/10^6 Si" or "ppm H2O"')
return
else:
print('Only going from H/10^6 Si, ppm H2O, and per m3 for now')
return
if printout is True:
output = ' '.join(('{:.2f}'.format(conc), from_unit, '=',
'{:.2f}'.format(new_conc), to_unit, 'for', phase))
print(output)
return new_conc | fdd0646a09f3a2c3a8cbbc02410103caa9e023dd | 5,952 |
import re
def countBasesInFasta(fastaFile):
"""
Given a fasta file, return a dict where the number of records and
the total number of bases are given by 'records' and 'bases' respectively.
"""
recordRE = re.compile(r'^>')
whiteSpaceRE = re.compile(r'\s+')
total_bases = 0
total_seqs = 0
with open(fastaFile) as f:
for line in f:
if recordRE.match(line):
total_seqs += 1
continue
total_bases += len(whiteSpaceRE.sub('', line))
return {'records': total_seqs, 'bases': total_bases} | 45eaa5b8d36b4bae6b97bb29fdead1efc0aed8c2 | 5,953 |
import torchvision
import torch
def load_mnist_denoising(path_raw_dataset, batch_size=1, mu=0., sigma=0.6, deterministic=True):
"""
1. Get the MNIST dataset via PyTorch built-in APIs.
2. Wrap it with customized wrapper with additive Gaussian noise processor
3. Build PyTorch data loader objects.
:param path_raw_dataset:
:param batch_size:
:param mu:
:param sigma:
:param deterministic:
:return: dict of pytorch DataLoader objects.
{
'train':
(iterable) [noisy_image, (clean_image, noise)]
noisy_image shape: [batch, c, w, h]
clean_image shape: [batch, c, w, h]
noise shape: [batch, 1, c, w, h]
'val':
(iterable) [noisy_image, (clean_image, noise)]
noisy_image shape: [batch, c, w, h]
clean_image shape: [batch, c, w, h]
noise shape: [batch, 1, c, w, h]
}
"""
MNIST = P.data_processor_wrapper(torchvision.datasets.MNIST,
P.Processor_Denoising_AddGau(mu, sigma, deterministic, grayscale=True))
transform_input = transforms.Compose([
transforms.ToTensor(),
P.TransTo3Channels()
])
try:
data_train = MNIST(root=path_raw_dataset, train=True, download=False,
transform=transform_input)
except:
torch_dataset_download_helper()
data_train = MNIST(root=path_raw_dataset, train=True, download=True,
transform=transform_input)
try:
data_val = MNIST(root=path_raw_dataset, train=False, download=False,
transform=transform_input)
except:
torch_dataset_download_helper()
data_val = MNIST(root=path_raw_dataset, train=False, download=True,
transform=transform_input)
datasets = {'train': data_train, 'val': data_val}
data_loaders = {i: torch.utils.data.DataLoader(datasets[i], batch_size=batch_size, shuffle=False)
for i in ['train', 'val']}
return data_loaders | 4dbd365a0fa6d795714aa90828fe7bb2cbc9b99f | 5,954 |
def make_triplet_freqs(sentence, triplet_freqs):
"""
ๆๅญๅใ3ใค็ตใซใใ
"""
# Janomeใงๅ่ชใซๅๅฒใใ
t = Tokenizer()
morphemes = [token.surface for token in t.tokenize(sentence)]
if len(morphemes) < 3:
return {}
# ็นฐใ่ฟใ
for i in range(len(morphemes) - 2):
triplet = tuple(morphemes[i:i+3])
triplet_freqs[triplet] += 1
# beginใ่ฟฝๅ
triplet = (BEGIN, morphemes[0], morphemes[1])
triplet_freqs[triplet] = 1
# endใ่ฟฝๅ
triplet = (morphemes[-2], morphemes[-1], END)
triplet_freqs[triplet] = 1
return triplet_freqs | 97fc3affd841e148f58de487d171df61745d17a9 | 5,955 |
def test_train_val_split(patient_id,
sub_dataset_ids,
cv_fold_number):
""" if cv_fold_number == 1:
if patient_id in sub_dataset_ids[-5:]: return 'test'
elif patient_id in sub_dataset_ids[-7:-5]: return 'validation'
else: return 'train'
elif cv_fold_number == 2:
if patient_id in sub_dataset_ids[-10:-5]: return 'test'
elif patient_id in sub_dataset_ids[-12:-10]: return 'validation'
else: return 'train'
# used for accumulating results of tests on cv1 and cv2
if cv_fold_number == 3:
if patient_id in sub_dataset_ids[-10:]: return 'test'
elif patient_id in sub_dataset_ids[-12:-11]: return 'validation'
else: return 'train' """
if patient_id in [1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]:
return 'test'
elif patient_id == 36:
return 'validation'
else:
return 'train' | 129f3856875033505555241408577f8885c9c393 | 5,956 |
import os
def get_convertible_info():
"""
D:\Trade\TDX\cjzq_tdx\T0002\hq_cache\speckzzdata.txt
:return:
"""
filename = '{}{}{}'.format(TDX_DIR, os.sep, 'T0002\\hq_cache\\speckzzdata.txt')
columns = [
'exchange', 'code', 'stock_code', 'convert_price', 'current_interest', 'list_amount', 'call_price',
'redeem_price',
'convert_start', 'due_price', 'convert_end', 'convert_code', 'current_amount', 'list_date', 'convert_ratio(%)'
]
df = pd.read_csv(filename, names=columns)
df['exchange'] = df['exchange'].apply(lambda x: 'sse' if x else 'szse')
df[['code', 'stock_code']] = df[['code', 'stock_code']].applymap(lambda x: '{:0>6d}'.format(x))
df[['list_amount', 'current_amount']] = df[['list_amount', 'current_amount']] * 10000
return df | 82d7b9485095b504f3d3a39bf446ccf7bc170558 | 5,957 |
def test_striplog_colour_plot():
"""
Tests mpl image of striplog with the ladder option.
"""
legend = Legend.builtin('NSDOE')
imgfile = "tutorial/M-MG-70_14.3_135.9.png"
striplog = Striplog.from_image(imgfile, 14.3, 135.9, legend=legend)
for iv in striplog:
iv.data['porosity'] = iv.top.z/100
fig = striplog.plot(colour='porosity', aspect=3, return_fig=True)
return fig | a76f01a5b6255a0dfe39aca7cc3e352787457d17 | 5,958 |
import subprocess
import sys
def fetch_data(
o_included: str,
flowcharts: dict,
o_metadata_file: str,
o_biom_file: str,
p_redbiom_context: str,
p_bloom_sequences: str,
p_reads_filter: int,
unique: bool,
update: bool,
dim: bool) -> pd.DataFrame:
"""
Parameters
----------
o_included : str
Path to output metadata for the included samples only.
flowcharts : dict
Steps of the workflow with samples counts (simpler representation).
o_metadata_file : str
[if --fetch] Path to the output metadata table file.
o_biom_file : str
[if --fetch] Path to the output biom table file.
p_redbiom_context : str
[if --fetch] Redbiom context for fetching 16S data from Qiita.
p_bloom_sequences : str
[if --fetch] Fasta file containing the sequences known
to bloom in fecal samples.
p_reads_filter : int
[if --fetch] Minimum number of reads per sample.
unique : bool
[if --fetch] Whether to keep a unique sample per host or not.
update : bool
[if --fetch] Update the sample names to remove Qiita-prep info.
dim : bool
[if --fetch] Whether to add the number of samples in the final
biom file name before extension or not.
Returns
-------
included : pd.DataFrame
Metadata for the included samples only.
"""
cmd = 'Xrbfetch'
cmd += ' -m %s' % o_included
if o_metadata_file:
o_meta = o_metadata_file
else:
o_meta = '%s_fetched.tsv' % splitext(o_included)[0]
cmd += ' -o %s' % o_meta
if o_biom_file:
cmd += ' -b %s' % o_biom_file
else:
cmd += ' -b %s_fetched.biom' % splitext(o_included)[0]
if p_bloom_sequences:
cmd += ' -s %s' % p_bloom_sequences
cmd += ' -r %s' % p_redbiom_context
cmd += ' -f %s' % p_reads_filter
if unique:
cmd += ' --unique'
if update:
cmd += ' --update'
if dim:
cmd += ' --dim'
cmd += ' --force'
cmd += ' --no-simple'
print('- fetch on redbiom:')
redbiom_fetching = subprocess.getoutput(cmd).split('\n')
print('Done.')
flowcharts['data'] = []
for step in redbiom_fetching:
step_line = step.strip()
if step_line.startswith('- Load biom table...'):
print('[fetch]', step_line)
n = step_line.split()[6]
flowcharts['data'].append(
['Fetch', n, 'redbiom', p_redbiom_context, None])
elif step_line.startswith('- Filter blooms...'):
print('[fetch]', step_line)
n = step_line.split()[5]
flowcharts['data'].append(
['Filter blooms', n, None, None, None])
elif step_line.startswith('- Get best samples from ambiguous'):
print('[fetch]', step_line)
n = step_line.split()[8]
flowcharts['data'].append(
['Solve redbiom ambiguous', n, 'most reads',
'...or... ', 'most features'])
elif step_line.startswith('- Filter biom for min'):
print('[fetch]', step_line)
f = step_line.split()[5]
n = step_line.split()[11]
flowcharts['data'].append(
['Filter reads', n, 'min %s' % f, None])
elif step_line.startswith('- Already one sample per host_subject_id'):
print('[fetch]', step_line)
n = step_line.split()[8]
flowcharts['data'].append(
['One per sample ID', n, None, None, None])
elif step_line.startswith('- Keep the best sample per host_subject_id'):
print('[fetch]', step_line)
n = step_line.split()[9]
flowcharts['data'].append(
['One per sample ID', n, None, None, None])
if 'Outputs:' in redbiom_fetching:
outs = redbiom_fetching[(redbiom_fetching.index('Outputs:') + 1):]
if len(outs):
return read_meta_pd(outs[0])
print('nothing fetched: check command:\n%s\nExiting...' % cmd)
sys.exit(1) | f7cb52b2dfc7c33038f448dfd8578a6e54d1d2fa | 5,959 |
def searchArtist(artistName, session=models.session):
"""Search for artist. Returns models.ArtistSearch"""
return models.ArtistSearch(artistName, session) | 4fd9e45b633285a9ee1817a84508749d1ba724e7 | 5,960 |
def _ddnone():
"""allow defaultdict to be pickled"""
return defaultdict(_none) | 9a050e08b0c47bc789f0238489c679d01a42c1ba | 5,961 |
from operator import and_
def apply_join(query: Select, table: Table, join_table: Table, join: TableJoin):
"""
Performs a inner or outer join between two tables on a given query object.
TODO: enable multiple joins
:param query: A SQLAlchemy select object.
:param table: The Table we are joining from.
:param join_table: The Table we are joining to.
:param join: The Join object describing how to join the tables.
:return: A SQLAlchemy select object modified to join two tables.
"""
error_msg = 'Invalid join, "{}" is not a column on table "{}"'
join_conditions = []
for column_pair in join.column_pairs:
from_col = table.columns.get(column_pair.from_column)
to_col = join_table.columns.get(column_pair.to_column)
if from_col is None:
raise ValueError(error_msg.format(column_pair.from_column, table.name))
if to_col is None:
raise ValueError(error_msg.format(column_pair.to_column, join_table.name))
join_conditions.append(from_col == to_col)
return query.select_from(table.join(join_table, onclause=and_(*join_conditions), isouter=join.outer_join)) | 1c5bfc7de3f1c7b9e17588e730085e5dc87d7c49 | 5,962 |
def filter_shapely(feature):
"""
feature1 = feature_extract(feature)
feature2 = filter_shapely(feature1)
"""
tmp = extract_Accumulation_entropy_list(feature)
tmp2=[]
for i in range(len(tmp)):
if i!=0:
tmp2.append(tmp[i]-tmp[i-1])
else:
tmp2.append(tmp[i])
return tmp2 | 54654130340a3485a7de9a3d5a51d3def8a01037 | 5,963 |
def stations_by_river(stations):
"""Returns a dictionary mapping river names (key)
to a list of stations (object)"""
rivers_stations_dict = {} # Create empty dictionary
for i in range(len(stations)): # Iterate through list of stations
# Data type checks
if type(stations[i]) is MonitoringStation:
pass # Checks if stations are correct class
else:
raise TypeError("ERROR: Station is not a MonitoringStation")
if type(stations[i].name) is str: # Checks if name is string
pass
else:
raise TypeError("ERROR: Station 'name' attribute is not a string")
if type(stations[i].river) is str: # Checks if river is string
pass
else:
raise TypeError("ERROR: Station 'river' attribute is not a string")
if not stations[i].river in rivers_stations_dict:
# Checks if river is not in dictionary
rivers_stations_dict[stations[i].river] = []
# Adds river to dictionary with blank list
if not stations[i].name in rivers_stations_dict:
rivers_stations_dict[stations[i].river].append(stations[i].name)
# Adds station name to object list
return rivers_stations_dict | d57bc06b60d6669bf6a10b7ad05363124f2312b5 | 5,964 |
def getCurrentProfile():
"""
Get the name of the current profile.
"""
return __createJSON("GetCurrentProfile", {}) | 6627d01348d566f0d079b8e7bcf04e35ad6ed0ba | 5,965 |
def get_params_from_request(req: web.Request) -> QueryParams:
"""
This function need for convert query string to filter parameters.
"""
page = int(req.rel_url.query.get('page', '1'))
cursor = req.rel_url.query.get('cursor')
sort = req.rel_url.query.get('sort')
sort_dir = req.rel_url.query.get('sortDir')
if sort and sort_dir == 'desc':
sort = f'-{sort}'
return QueryParams(
page=page,
cursor=int(cursor) if cursor else None,
order_by=sort,
) | b0deb4e5a1dc10fe82745e6c3c0869015424e2e0 | 5,966 |
def norm_mem_interval(pt):
"""Normalize membership in interval."""
return pt.on_prop(arg_conv(binop_conv(auto.auto_conv()))) | b50aa86d942fe1c2f35c6bcffae350042ff86090 | 5,967 |
def create_figure():
"""
Creates a simple example figure.
"""
fig = Figure()
a = fig.add_subplot(111)
t = np.arange(0.0, 3.0, 0.01)
s = np.sin(2 * np.pi * t)
a.plot(t, s)
return fig | 532a4eda745cb969f8ef60e66d6f63e761b8a5ff | 5,968 |
def rdf_reader(src):
"""rdf = rdf_reader(src)
src rdf filename
rdf The RDF mapping object"""
return RDF(*list(rdf_include(src))) | cf64ee6ed12a3e0d1667a537ac696918d26f80ba | 5,969 |
def draw_signalData(Nsamp=1, alpha=__alpha, beta=__beta, **kwargs):
"""
draw an SNR from the signal distribution
"""
return np.array([ncx2.rvs(__noise_df, nc) for nc in __draw_truncatedPareto(Nsamp, alpha=alpha, beta=beta)]) | 6dc320e2289c30a0e68696be71ded30066d7fa74 | 5,970 |
def choose_weighted_images_forced_distribution(num_images, images, nodes):
"""Returns a list of images to cache
Enforces the distribution of images to match the weighted distribution as
closely as possible. Factors in the current distribution of images cached
across nodes.
It is important to note that there may be circumstances which prevent this
function from attaining the desired ideal distribution, but the function
will always try its best to reach the desired distribution based on the
specified weights.
num_images - the number (integer) of images to choose to cache
images - a list of to ImageInputs consider for caching
nodes - a list of NodeInputs to use for determining which images
need to be cached the most
"""
named_distribution = _get_named_image_distribution(images, nodes)
# Take the difference of the desired distribution with the current
# one.
scaled_weights = _get_scaled_weights(
images, _get_scale_factor_for_caching_nodes(num_images, images, nodes))
distribution_difference = [
[image, (scaled_weights[image.name] - named_distribution[image.name])]
for image in images
]
def decrement_distribution(distribution_pair, diff_dict):
distribution_pair[1] -= 1
return _pick_images(
images, distribution_difference, num_images,
picker_func=lambda diff: max(diff, key=lambda pair: pair[1]),
distribution_mutator_func=decrement_distribution) | 8cf49fd376893be254d5075930475de9cedee004 | 5,971 |
def predict_lumbar_ankles_model(data):
"""Generate lumbar + 2 ankles model predictions for data.
Args:
data (dict): all data matrices/lists for a single subject.
Returns:
labels (dict): columns include 'probas' (from model) and 'true'
(ground truth). One row for each fold.
"""
RESULT_DIR = '../results/imus6_subjects7/sensors03_lumbar_ankles/'\
'iteration0/'
data = selectFeats(data, ['lumbar','ankle_r','ankle_l'])
test_dset = (data['X'], data['y'])
subject = str(int(data['subjectID']))
model = load_model_and_weights(subject, RESULT_DIR)
labels = make_predictions(model, test_dset)
return labels | 581a45a71bb17ebebf3a8ea63dbbfb898c6e3567 | 5,972 |
def breakOnEnter(func=None, *, debugger='pdb'):
"""
A function wrapper that causes debug mode to be entered when the
wrapped function is called.
Parameters
----------
func : The function to wrap.
debugger : The debugger used when debug mode is entered. This can
be either the debugging module itself or a string containing
the name of the debugging module. Currently, pdb and ipdb are
supported.
"""
if func is None:
return partial(breakOnEnter, debugger=debugger)
debugger = import_(debugger)
@wraps(func)
def wrapper(*args, **kwargs):
return debugger.runcall(func, *args, **kwargs)
return wrapper | 58b1e965a563ca19c886eef6a623af84ae9ae29c | 5,973 |
def linear_search(iterable, item):
"""Returns the index of the item in the unsorted iterable.
Iterates through a collection, comparing each item to the target item, and
returns the index of the first item that is equal to the target item.
* O(n) time complexity
* O(1) space complexity
Args:
iterable: A collection that is iterable.
item: An object to search for.
Returns:
The index of the item in the sorted iterable, or -1 if not found.
Raises:
TypeError: If iterable is not iterable.
"""
try:
_ = iter(iterable)
except TypeError:
raise TypeError('\'{}\' object is not iterable'.format(
type(iterable).__name__))
for index, _item in enumerate(iterable):
if _item == item:
return index
return -1 | bdbd7e70cea79deef1375648bde61067df1d2221 | 5,974 |
def create_MD_tag(reference_seq, query_seq):
"""Create MD tag
Args:
reference_seq (str) : reference sequence of alignment
query_seq (str) : query bases of alignment
Returns:
md_tag(str) : md description of the alignment
"""
no_change = 0
md = []
for ref_base, query_base in zip(reference_seq, query_seq):
if ref_base.upper() == query_base:
no_change += 1
else:
if no_change > 0:
md.append(str(no_change))
md.append(ref_base)
no_change = 0
if no_change > 0:
md.append(str(no_change))
return ''.join(md) | 4b711521d00af132e8e29fe4fc44785b985c2607 | 5,975 |
import subprocess
import os
def get_diff(base, head=None):
"""Return a git diff between the base and head revision.
:type base: str
:type head: str | None
:rtype: list[str]
"""
if not head or head == 'HEAD':
head = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()
cache = '/tmp/git-diff-cache-%s-%s.log' % (base, head)
if os.path.exists(cache):
with open(cache, 'r') as cache_fd:
lines = cache_fd.read().splitlines()
else:
lines = subprocess.check_output(['git', 'diff', base, head]).splitlines()
with open(cache, 'w') as cache_fd:
cache_fd.write('\n'.join(lines))
assert lines
return lines | a69fc80c03c2cf89ec47b510397bf60c8dc5732c | 5,976 |
def split_last(dataframe, target_col, sort_col='date', cut=.9):
"""Splits the dataframe on sort_column at the given cut ratio, and splits
the target column
Args:
dataframe: dataframe to be cut
sort_col: column to be sorted on. Default='date'
cut: cut ratio for the train/eval sets
Returns:
X_train: dataframe of the first cut of the data set without the target
y_train: dataframe of the first cut of the data set only target values
X_eval: dataframe of the remaining slice of the data set without target
y_eval: dataframe of the remaining slice of the data set only targets
"""
if sort_col != None:
dataframe = dataframe.sort_values(by=sort_col, axis='columns')
cutoff = dataframe.shape[0]*cut
first_df = dataframe.reset_index(drop=True).loc[:cutoff]
last_df = dataframe.reset_index(drop=True).loc[cutoff:]
X_train = first_df.drop(columns=[target_col])
y_train = np.array(first_df[target_col]).ravel()
X_eval = last_df.drop(columns=[target_col])
y_eval = np.array(last_df[target_col]).ravel()
return X_train, y_train, X_eval, y_eval | 090144fa9c68f8ffc9e9e7c2e9c8427f0aff862d | 5,977 |
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings() | 2fc8c4d12467ab3c0b86201271f42b7d22130b82 | 5,978 |
import csv
def readCSV(associated_ipaddr, ipaddr, timestamp):
"""
Method that extracts observations from a CSV file.
Parameters:
associated_ipaddr (str): The name of the column that specifies IP addresses of VPN clients
ipaddr (str): The name of the column that specifies IP addresses of users on the public internet
timestamp (str): The name of the column that specifies the observation creation time
Returns:
observations (list): A list of observation dictionaries
"""
observations = []
with open(CSV_FILE, "rt", encoding="ascii") as f:
reader = csv.reader(f)
header = next(reader, None)
for row in reader:
observations.append(
{
"associated_ipaddr": row[header.index(associated_ipaddr)],
"ipaddr": row[header.index(ipaddr)],
"timestamp": translateTime(row[header.index(timestamp)]),
}
)
return observations | 77594e98b83cd5d49bd8a70b28b54cab92dcadeb | 5,979 |
def interp2d(x, y, z, outshape, verbose=True, doplot=True):
"""
Parameters
----------
x, y : int
X and Y indices of `z`.
z : float
Values for given `x` and `y`.
outshape : tuple of int
Shape of 2D output array.
verbose : bool, optional
Print info to screen.
doplot : bool, optional
Plot results.
Returns
-------
im : float array
2-D array of interpolated data.
"""
# Print the data to screen for checking
if verbose:
print 'DATA USED FOR INTERPOLATION:'
for i, (xx, yy, zz) in enumerate(zip(x, y, z), start=1):
print '{}: {} {} {}'.format(i, xx, yy, zz)
# Perform 2D interpolation
func = interpolate.interpolate.interp2d(x, y, z)
im = func(np.mgrid[:outshape[1]], np.mgrid[:outshape[0]])
if doplot:
# Get min/max to use same colorbar on for base and overlay
pmin = im.min()
pmax = im.max()
fig, ax = plt.subplots()
# Show interpolated 2D image
p = ax.imshow(im, vmin=pmin, vmax=pmax)
# Overlay data points used for interpolation
ax.scatter(x, y, s=100, c=z, vmin=pmin, vmax=pmax, marker='s')
# Display colorbar.
# Shrink to make it same width as display.
c = fig.colorbar(p, orientation='horizontal', shrink=0.7)
c.set_label('Pixel value')
# Plot labels
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_title('Interpolated image')
plt.draw()
return im | 05558e413139a0ad71a4240e3f44c4bb9019c314 | 5,980 |
import shlex
import subprocess
def run_cmd(cmd: Text, split: bool = True, shell=False, verbose: bool = True):
"""Run a system command and print output."""
print(f'CMD: {cmd}')
cmd = shlex.split(cmd) if split else [cmd]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=shell)
while True:
output = process.stdout.readline().decode('utf-8').strip()
if output == '' and process.poll() is not None:
break
if output and verbose:
print(output)
return_code = process.poll()
if return_code != 0:
print(f'\tERROR ({return_code}) running command!')
return return_code | 071df0f5b185249128b3c52a48aa1787d1299fe9 | 5,981 |
def _nms_boxes(detections, nms_threshold):
"""Apply the Non-Maximum Suppression (NMS) algorithm on the bounding
boxes with their confidence scores and return an array with the
indexes of the bounding boxes we want to keep.
# Args
detections: Nx7 numpy arrays of
[[x, y, w, h, box_confidence, class_id, class_prob],
......]
"""
x_coord = detections[:, 0]
y_coord = detections[:, 1]
width = detections[:, 2]
height = detections[:, 3]
box_confidences = detections[:, 4] * detections[:, 6]
areas = width * height
ordered = box_confidences.argsort()[::-1]
keep = list()
while ordered.size > 0:
# Index of the current element:
i = ordered[0]
keep.append(i)
xx1 = np.maximum(x_coord[i], x_coord[ordered[1:]])
yy1 = np.maximum(y_coord[i], y_coord[ordered[1:]])
xx2 = np.minimum(x_coord[i] + width[i], x_coord[ordered[1:]] + width[ordered[1:]])
yy2 = np.minimum(y_coord[i] + height[i], y_coord[ordered[1:]] + height[ordered[1:]])
width1 = np.maximum(0.0, xx2 - xx1 + 1)
height1 = np.maximum(0.0, yy2 - yy1 + 1)
intersection = width1 * height1
union = (areas[i] + areas[ordered[1:]] - intersection)
iou = intersection / union
indexes = np.where(iou <= nms_threshold)[0]
ordered = ordered[indexes + 1]
keep = np.array(keep)
return keep | 9d3ad16396f1e94e4ac8efe1e73e8f06b529ff0f | 5,982 |
import types
def dht_get_key(data_key):
"""
Given a key (a hash of data), go fetch the data.
"""
dht_client = get_dht_client()
ret = dht_client.get(data_key)
if ret is not None:
if type(ret) == types.ListType:
ret = ret[0]
if type(ret) == types.DictType and ret.has_key("value"):
ret = ret["value"]
else:
raise Exception("No data returned from %s" % data_key)
return ret | 0c8680996e21b7dcc02cd4b7d81f3fa500b02076 | 5,983 |
def get_dataframe_from_table(table_name, con):
"""
put table into DataFrame
"""
df = pd.read_sql_table(table_name, con)
return df | cdf94277c2f4e3acdd22b87de7cd9d0fee63b24c | 5,984 |
from typing import List
from typing import Dict
import requests
def _find_links_in_headers(*, headers, target_headers: List[str]) -> Dict[str, Dict[str, str]]:
"""Return a dictionary { rel: { url: 'url', mime_type: 'mime_type' } } containing the target headers."""
found: Dict[str, Dict[str, str]] = {}
links = headers.get("link")
if links:
# [{'url': 'https://micropub.jamesg.blog/micropub', 'rel': 'micropub'} ]
parsed_link_headers: List[Dict[str, str]] = requests.utils.parse_header_links(links)
else:
return found
for header in parsed_link_headers:
url = header.get("url", "")
rel = header.get("rel", "")
mime_type = header.get("type", "")
if _is_http_url(url) and rel in target_headers:
found[rel] = {
"url": url,
"mime_type": mime_type,
}
# Add check for x-pingback header
if "x-pingback" in target_headers:
pingback_url = headers.get("x-pingback")
if _is_http_url(pingback_url):
# assign as "pingback" key in dictionary
found["pingback"] = {
"url": url,
"mime_type": "",
}
return found | ee23c9c7ca2633d11ea33ac2695a46eca4188af5 | 5,985 |
import re
def calc_word_frequency(my_string, my_word):
"""Calculate the number of occurrences of a given word in a given string.
Args:
my_string (str): String to search
my_word (str): The word to search for
Returns:
int: The number of occurrences of the given word in the given string.
"""
# Remove all non alphanumeric characters from the string
filtered_string = re.sub(r'[^A-Za-z0-9 ]+', '', my_string)
# Return the number of occurrences of my_word in the filtered string
return filtered_string.split().count(my_word) | 15ff723dd2ff089fb12cccb38283f1f75e37079d | 5,986 |
from typing import Counter
def asyn_lpa_communities(G, weight=None, seed=None):
"""Returns communities in `G` as detected by asynchronous label
propagation.
The asynchronous label propagation algorithm is described in
[1]_. The algorithm is probabilistic and the found communities may
vary on different executions.
The algorithm proceeds as follows. After initializing each node with
a unique label, the algorithm repeatedly sets the label of a node to
be the label that appears most frequently among that nodes
neighbors. The algorithm halts when each node has the label that
appears most frequently among its neighbors. The algorithm is
asynchronous because each node is updated without waiting for
updates on the remaining nodes.
This generalized version of the algorithm in [1]_ accepts edge
weights.
Parameters
----------
G : Graph
weight : string
The edge attribute representing the weight of an edge.
If None, each edge is assumed to have weight one. In this
algorithm, the weight of an edge is used in determining the
frequency with which a label appears among the neighbors of a
node: a higher weight means the label appears more often.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
communities : iterable
Iterable of communities given as sets of nodes.
Notes
------
Edge weight attributes must be numerical.
References
----------
.. [1] Raghavan, Usha Nandini, Rรฉka Albert, and Soundar Kumara. "Near
linear time algorithm to detect community structures in large-scale
networks." Physical Review E 76.3 (2007): 036106.
"""
labels = {n: i for i, n in enumerate(G)}
cont = True
while cont:
cont = False
nodes = list(G)
seed.shuffle(nodes)
# Calculate the label for each node
for node in nodes:
if len(G[node]) < 1:
continue
# Get label frequencies. Depending on the order they are processed
# in some nodes with be in t and others in t-1, making the
# algorithm asynchronous.
label_freq = Counter()
for v in G[node]:
label_freq.update({labels[v]: G.edges[v, node][weight]
if weight else 1})
# Choose the label with the highest frecuency. If more than 1 label
# has the highest frecuency choose one randomly.
max_freq = max(label_freq.values())
best_labels = [label for label, freq in label_freq.items()
if freq == max_freq]
new_label = seed.choice(best_labels)
labels[node] = new_label
# Continue until all nodes have a label that is better than other
# neighbour labels (only one label has max_freq for each node).
cont = cont or len(best_labels) > 1
# TODO In Python 3.3 or later, this should be `yield from ...`.
return iter(groups(labels).values()) | d6696f9347684dee6a81c8dc7c240a3c200ec629 | 5,987 |
def _make_warmstart_dict_env():
"""Warm-start VecNormalize by stepping through BitFlippingEnv"""
venv = DummyVecEnv([make_dict_env])
venv = VecNormalize(venv)
venv.reset()
venv.get_original_obs()
for _ in range(100):
actions = [venv.action_space.sample()]
venv.step(actions)
return venv | 67e0ee3e8440c24a08e306afbb9891dee64dd11d | 5,988 |
def record_attendance(lesson_id):
"""
Record attendance for a lesson.
"""
# Get the UserLessonAssociation for the current and
# the given lesson id. (So we can also display attendance etc.)
lesson = Lesson.query.filter(Lesson.lesson_id == lesson_id).first()
# Ensure the lesson id/association object is found.
if not lesson:
abort(404)
record_single_attendance_form = RecordSingleAttendanceForm()
if request.method == 'POST' and record_single_attendance_form.validate_on_submit():
assoc = UserLessonAssociation.query.filter(
UserLessonAssociation.lesson_id == lesson_id
).filter(
UserLessonAssociation.user_id == int(record_single_attendance_form.user_id.data)
).first()
if assoc:
assoc.attendance_code = record_single_attendance_form.attendance_code.data
flash("Successfully updated lesson attendance.")
else:
abort(500)
# We only want to send updates if they we're late or not there.
if assoc.attendance_code == 'L' or assoc.attendance_code == 'N':
# Send an email update.
html = 'Attendance for your lesson on: ' + assoc.lesson.get_lesson_date() \
+ ' has been updated. Your attendance is now recorded as: ' + \
assoc.get_lesson_attendance_str()
# Send a lesson update.
send_lesson_update(
assoc.user, html,
url_for(
'student.view_lesson',
lesson_id=lesson_id,
_external=True
),
parent=True
)
if check_attendance_complete(lesson):
# The attendance is complete.
lesson.update_lesson_details(attendance_recorded=True)
else:
lesson.update_lesson_details(attendance_recorded=False)
# Save Changes
db.session.commit()
# Refresh
return redirect(url_for('staff.record_attendance', lesson_id=lesson_id))
# Render the view lesson template and pass in the association and the lesson object.
return render_template(
'staff/record_attendance.html', lesson=lesson,
record_single_attendance_form=record_single_attendance_form
) | 237fb1df5eaf1f1b7d9555ca636971318f23c360 | 5,989 |
def ts_to_datestr(ts, fmt="%Y-%m-%d %H:%M"):
"""ๅฏ่ฏปๆง"""
return ts_to_datetime(ts).strftime(fmt) | 29b180c0d569768b173afb960d9cb09e86519741 | 5,990 |
def _ComplexAbsGrad(op, grad):
"""Returns the gradient of ComplexAbs."""
# TODO(b/27786104): The cast to complex could be removed once arithmetic
# supports mixtures of complex64 and real values.
return (math_ops.complex(grad, array_ops.zeros_like(grad)) *
math_ops.sign(op.inputs[0])) | f150a36bb852e4c722771220d4ed976875fce0ef | 5,991 |
def so3_rotate(batch_data):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in range(batch_data.shape[0]):
rotation_angle_A = np.random.uniform() * 2 * np.pi
rotation_angle_B = np.random.uniform() * 2 * np.pi
rotation_angle_C = np.random.uniform() * 2 * np.pi
cosval_A = np.cos(rotation_angle_A)
sinval_A = np.sin(rotation_angle_A)
cosval_B = np.cos(rotation_angle_B)
sinval_B = np.sin(rotation_angle_B)
cosval_C = np.cos(rotation_angle_C)
sinval_C = np.sin(rotation_angle_C)
rotation_matrix = np.array([[cosval_B*cosval_C, -cosval_B*sinval_C, sinval_B],
[sinval_A*sinval_B*cosval_C+cosval_A*sinval_C, -sinval_A*sinval_B*sinval_C+cosval_A*cosval_C, -sinval_A*cosval_B],
[-cosval_A*sinval_B*cosval_C+sinval_A*sinval_C, cosval_A*sinval_B*sinval_C+sinval_A*cosval_C, cosval_A*cosval_B]])
shape_pc = batch_data[k, ...]
rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
return rotated_data | 84c184c920833bf2037b0f4181e9f25bcf6fd5ce | 5,992 |
import hashlib
def intmd5(source: str, nbytes=4) -> int:
"""
Generate a predictive random integer of nbytes*8 bits based on a source string.
:param source:
seed string to generate random integer.
:param nbytes:
size of the integer.
"""
hashobj = hashlib.md5(source.encode())
return int.from_bytes(hashobj.digest()[:nbytes], byteorder="big", signed=False) | c03eb99a67af00a4a081423ecca3a724111514e1 | 5,993 |
def trisolve(a, b, c, y, inplace=False):
"""
The tridiagonal matrix (Thomas) algorithm for solving tridiagonal systems
of equations:
a_{i}x_{i-1} + b_{i}x_{i} + c_{i}x_{i+1} = y_{i}
in matrix form:
Mx = y
TDMA is O(n), whereas standard Gaussian elimination is O(n^3).
Arguments:
-----------
a: (n - 1,) vector
the lower diagonal of M
b: (n,) vector
the main diagonal of M
c: (n - 1,) vector
the upper diagonal of M
y: (n,) vector
the result of Mx
inplace:
if True, and if b and y are both float64 vectors, they will be
modified in place (may be faster)
Returns:
-----------
x: (n,) vector
the solution to Mx = y
References:
-----------
http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm
http://www.netlib.org/lapack/explore-html/d1/db3/dgtsv_8f.html
"""
if (a.shape[0] != c.shape[0] or a.shape[0] >= b.shape[0]
or b.shape[0] != y.shape[0]):
raise ValueError('Invalid diagonal shapes')
yshape_in = y.shape
if y.ndim == 1:
# needs to be (ldb, nrhs)
y = y[:, None]
rtype = np.result_type(a, b, c, y)
if not inplace:
# force a copy
a = np.array(a, dtype=rtype, copy=True, order='C')
b = np.array(b, dtype=rtype, copy=True, order='C')
c = np.array(c, dtype=rtype, copy=True, order='C')
y = np.array(y, dtype=rtype, copy=True, order='C')
# this may also force copies if arrays have inconsistent types / incorrect
# order
a, b, c, y = (np.array(v, dtype=rtype, copy=False, order='C')
for v in (a, b, c, y))
# y will now be modified in place to give the result
if rtype == np.float32:
_fnndeconv.TDMAs_lapacke(a, b, c, y)
elif rtype == np.float64:
_fnndeconv.TDMAd_lapacke(a, b, c, y)
else:
raise ValueError('Unsupported result type: %s' %rtype)
return y.reshape(yshape_in) | ead814b1025e8458f7e1eabeecf3eb89cb9edd5d | 5,994 |
def calc_mean_pred(df: pd.DataFrame):
"""
Make a prediction based on the average of the predictions of phones
in the same collection.
from https://www.kaggle.com/t88take/gsdc-phones-mean-prediction
"""
lerp_df = make_lerp_data(df=df)
add_lerp = pd.concat([df, lerp_df])
# each time step == only one row, average over all phone latDeg,
# lanDeg at each time step
# eg. mean(original Deg Pixel4 and interpolated Deg 4XLModded with `make_lerp_data`)
mean_pred_result = (
add_lerp.groupby(["collectionName", "millisSinceGpsEpoch"])[
["latDeg", "lngDeg"]
]
.mean()
.reset_index()
)
base_cols = ["collectionName", "phoneName", "phone", "millisSinceGpsEpoch"]
try:
mean_pred_df = df[base_cols + ["latDeg_gt", "lngDeg_gt", "speedMps"]].copy()
except Exception:
mean_pred_df = df[base_cols].copy()
mean_pred_df = mean_pred_df.merge(
mean_pred_result[["collectionName", "millisSinceGpsEpoch", "latDeg", "lngDeg"]],
on=["collectionName", "millisSinceGpsEpoch"],
how="left",
)
return mean_pred_df | a4f6cdb0d5efb72cd6b503a8eb3a0f4b13cee0bf | 5,995 |
def get_meals(v2_response, venue_id):
"""
Extract meals into old format from a DiningV2 JSON response
"""
result_data = v2_response["result_data"]
meals = []
day_parts = result_data["days"][0]["cafes"][venue_id]["dayparts"][0]
for meal in day_parts:
stations = []
for station in meal["stations"]:
items = []
for item_id in station["items"]:
item = result_data["items"][item_id]
new_item = {}
new_item["txtTitle"] = item["label"]
new_item["txtPrice"] = ""
new_item["txtNutritionInfo"] = ""
new_item["txtDescription"] = item["description"]
new_item["tblSide"] = ""
new_item["tblFarmToFork"] = ""
attrs = [{"description": item["cor_icon"][attr]} for attr in item["cor_icon"]]
if len(attrs) == 1:
new_item["tblAttributes"] = {"txtAttribute": attrs[0]}
elif len(attrs) > 1:
new_item["tblAttributes"] = {"txtAttribute": attrs}
else:
new_item["tblAttributes"] = ""
if isinstance(item["options"], list):
item["options"] = {}
if "values" in item["options"]:
for side in item["options"]["values"]:
new_item["tblSide"] = {"txtSideName": side["label"]}
items.append(new_item)
stations.append({"tblItem": items, "txtStationDescription": station["label"]})
meals.append({"tblStation": stations, "txtDayPartDescription": meal["label"]})
return meals | 9d27d225a39248690529167f7ff18777a086bcc6 | 5,996 |
async def async_setup(hass, config_entry):
""" Disallow configuration via YAML """
return True | 759cc705a82a0f9ff9d4d43cb14d641d7e552aaa | 5,997 |
def blend(im1, im2, mask):
"""
Blends and shows the given images according to mask
:param im1: first image
:param im2: second image
:param mask: binary mask
:return: result blend
"""
res = []
for i in range(3):
res.append(pyramid_blending(im1[:, :, i], im2[:, :, i], mask, 7, 5, 5))
res = np.dstack(res)
fig, a = plt.subplots(nrows=2, ncols=2)
a[0][0].imshow(im1, cmap='gray')
a[0][1].imshow(im2, cmap='gray')
a[1][0].imshow(mask, cmap='gray')
a[1][1].imshow(res, cmap='gray')
plt.show()
return res | 4b4a635d1f44ced411b9dfe2037b0f42805f38b2 | 5,998 |
def parse(fileName):
"""
Pull the EXIf info from a photo and sanitize it so for sending as JSON
by converting values to strings.
"""
f = open(fileName, 'rb')
exif = exifread.process_file(f, details=False)
parsed = {}
for key, value in exif.iteritems():
parsed[key] = str(value)
return parsed | 3f5aca5b38dd7f3b3a9defae1fc5f645e255a191 | 5,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.