content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def dustSurfaceDensitySingle(R, Rin, Sig0, p):
"""
Calculates the dust surface density (Sigma d) from single power law.
"""
return Sig0 * pow(R / Rin, -p)
|
441466f163a7b968cf193e503d43a1b014be7c5d
| 32,136 |
def rightToPurchase(
symbol="", refid="", token="", version="", filter="", **timeseries_kwargs
):
"""Right to purchase up-to-date and detailed information on all new announcements, as well as 12+ years of historical records.
Updated at 5am, 10am, 8pm UTC daily
https://iexcloud.io/docs/api/#right-to-purchase
Args:
symbol (str): Symbol to look up
refid (str): Optional. Id that matches the refid field returned in the response object. This allows you to pull a specific event for a symbol.
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
Supports all kwargs from `pyEX.stocks.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_raiseIfNotStr(symbol)
symbol = _quoteSymbols(symbol)
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="advanced_right_to_purchase",
key=symbol,
subkey=refid,
token=token,
version=version,
filter=filter,
**timeseries_kwargs
)
|
8897902c7b729642cdd5e89658f62b70bccf2133
| 32,137 |
def compute_edits(old, new):
"""Compute the in-place edits needed to convert from old to new
Returns a list ``[(index_1,change_1), (index_2,change_2)...]``
where ``index_i`` is an offset into old, and ``change_1`` is the
new bytes to replace.
For example, calling ``compute_edits("abcdef", "qbcdzw")`` will return
``[(0, "q"), (4, "zw")]``.
That is, the update should be preformed as (abusing notation):
``new[index:index+len(change)] = change``
:param str old: The old data
:param str new: The new data
:returns: A list of tuples (index_i, change_i)
"""
deltas = []
delta = None
for index, (n, o) in enumerate(zip(new, old)):
if n == o:
if delta is not None:
deltas.append(delta)
delta = None
else:
if delta is None:
delta = (index, [])
delta[1].append(n)
if delta is not None:
deltas.append(delta)
return [(i, "".join(x)) for i, x in deltas]
|
f729addf84207f526e27d67932bb5300ced24b54
| 32,138 |
def gpx_to_lat_lon_list(filename):
"""
Summary: takes a .gpx file and turns the latitude and longitudes into a list of tuples
Returns: list of tuples (latitude, longitude).
"""
gpx_file = open(filename, "r")
gpx = gpxpy.parse(gpx_file)
latlonlist = []
if len(gpx.tracks) > 0:
print("tracks")
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
latlonlist.append((point.longitude, point.latitude))
elif len(gpx.routes) > 0:
print("routes")
for route in gpx.routes:
for point in route.points:
latlonlist.append((point.longitude, point.latitude))
else:
print("sorry mate, didn't care enough to implement this")
return latlonlist
|
4d413a5894a30bb176a103b8a3933685490f30fe
| 32,139 |
def pre_order(size):
"""List in pre order of integers ranging from 0 to size in a balanced
binary tree.
"""
interval_list = [None] * size
interval_list[0] = (0, size)
tail = 1
for head in range(size):
start, end = interval_list[head]
mid = (start + end) // 2
if mid > start:
interval_list[tail] = (start, mid)
tail += 1
if mid + 1 < end:
interval_list[tail] = (mid + 1, end)
tail += 1
interval_list[head] = mid
return interval_list
|
45ab688c627c19cd0b9c1200830a91b064d46bda
| 32,140 |
def LockPrefix():
"""Returns the lock prefix as an operand set."""
return set([Operands(disasms=('lock',))])
|
d4f84027494ad176efcb8c01f14876474aaca57f
| 32,141 |
def distance(pt, pts):
"""Distances of one point `pt` to a set of points `pts`.
"""
return np.sqrt((pts[:,0] - pt[0])**2 + (pts[:,1] - pt[1])**2)
|
06512472ac6c0e58182ad58190c82fa619d66d40
| 32,142 |
def prepare_rw_output_stream(output):
"""
Prepare an output stream that supports both reading and writing.
Intended to be used for writing & updating signed files:
when producing a signature, we render the PDF to a byte buffer with
placeholder values for the signature data, or straight to the provided
output stream if possible.
More precisely: this function will return the original output stream
if it is writable, readable and seekable.
If the ``output`` parameter is ``None``, not readable or not seekable,
this function will return a :class:`.BytesIO` instance instead.
If the ``output`` parameter is not ``None`` and not writable,
:class:`.IOError` will be raised.
:param output:
A writable file-like object, or ``None``.
:return:
A file-like object that supports reading, writing and seeking.
"""
if output is None:
output = BytesIO()
else:
if not assert_writable_and_random_access(output):
output = BytesIO()
return output
|
af1afe87e5de12cad9eb72b93da069327c1fffb5
| 32,143 |
from pathlib import Path
def add_references(md, tmp_dir, args):
"""
Remember that this function is run for main, review, and editor.
"""
citations_to_do = tmp_dir / 'citations.json'
biblio = Path(args.library).with_suffix('.json')
_prepare_node_input(md, citations_to_do)
_check_citation_keys(citations_to_do, biblio)
_process_node(tmp_dir, biblio, args)
md = _read_node_output(md, tmp_dir)
return md
|
5c4319720a809c9e6543ef078598b7a3539c3492
| 32,144 |
from pandas import Timestamp
def timestamp_now() -> Timestamp:
"""Returns a pandas timezone (UTC) aware Timestamp for the current time.
Returns:
pandas.Timestamp: Timestamp at current time
"""
return timestamp_tzaware(Timestamp.now())
|
545b0cb72691d3db127ccfc847295a4bc4902004
| 32,146 |
def readFlat4D(fn,interp=None):
"""
Load in data from 4D measurement of flat mirror.
Scale to microns, remove misalignments,
strip NaNs.
Distortion is bump positive looking at surface from 4D.
Imshow will present distortion in proper orientation as if
viewing the surface.
"""
#Get xpix value in mm
l = getline(fn,9)
dx = float(l.split()[1])*1000.
#Remove NaNs and rescale
d = np.genfromtxt(fn,skip_header=12,delimiter=',')
d = man.stripnans(d)
d = d *.6328
d = d - np.nanmean(d)
d = np.fliplr(d)
#Interpolate over NaNs
if interp is not None:
d = man.nearestNaN(d,method=interp)
return d,dx
|
8443ab4943bb571d1ead1f8f4342efec8e426139
| 32,147 |
def inner_product(D1, D2):
"""
Take the inner product of the frequency maps.
"""
result = 0.
for key in D1:
if key in D2:
result += D1[key] * D2[key]
return result
|
95efb9f63d6a379e1c5f7c8f6ad4bfd4061e2032
| 32,148 |
def list_launch_daemons():
"""
Return an array of the files that are present in /Library/LaunchDaemons/
and /System/Library/LaunchDaemons/
"""
files = list_files_in_dir("/Library/LaunchDaemons/")
files += list_files_in_dir("/System/Library/LaunchDaemons/")
return files
|
8e1f0ab1bb78a9121f5c00f032a5c8dc089f39b0
| 32,149 |
import struct
def us_varchar_encode(text):
"""
encode with utf-16-le
UShort *Varchar
:param str text:
:return:
"""
if not text:
return '\x00\x00'
length = len(text)
return struct.pack('<H', length) + text.encode('utf-16-le')
|
07b232cd83e023d770fc4e7cd63250ad746aae19
| 32,151 |
from typing import List
def graph_to_diagonal_h(n: int, nodes: List[int]) -> np.ndarray:
"""Construct diag(H)."""
h = [0.0] * 2**n
for node in nodes:
diag = tensor_diag(n, node[0], node[1], node[2])
for idx, val in enumerate(diag):
h[idx] += val
return h
|
5c73d4b4a98465f3f03d9a423f867479b48da8fe
| 32,152 |
def condensational_heating(dQ2):
"""
Args:
dQ2: rate of change in moisture in kg/kg/s, negative corresponds
to condensation
Returns:
heating rate in degK/s
"""
return tf.math.scalar_mul(tf.constant(-LV / CPD, dtype=dQ2.dtype), dQ2)
|
55d5ec36bf1f4a217e239e35fb95e14060b07fb8
| 32,153 |
def collect_stats(cube, store, datasets=None):
"""
Collect statistics for given cube.
Parameters
----------
cube: Cube
Cube specification.
store: simplekv.KeyValueStore
KV store that preserves the cube.
datasets: Union[None, Iterable[str], Dict[str, kartothek.core.dataset.DatasetMetadata]]
Datasets to query, must all be part of the cube. May be either the result of :meth:`discover_datasets`, a list
of Ktk_cube dataset ID or ``None`` (in which case auto-discovery will be used).
Returns
-------
stats: Dict[str, Dict[str, int]]
Statistics per ktk_cube dataset ID.
"""
if callable(store):
store = store()
if not isinstance(datasets, dict):
datasets = discover_datasets_unchecked(
uuid_prefix=cube.uuid_prefix,
store=store,
filter_ktk_cube_dataset_ids=datasets,
)
all_metapartitions = get_metapartitions_for_stats(datasets)
return reduce_stats([collect_stats_block(all_metapartitions, store)])
|
526405128e95e13fb6f011300ddcda922ebe8582
| 32,154 |
def post_move_subject(subject_uuid: SubjectId, target_report_uuid: ReportId, database: Database):
"""Move the subject to another report."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
source = SubjectData(data_model, reports, subject_uuid)
target = ReportData(data_model, reports, target_report_uuid)
target.report["subjects"][subject_uuid] = source.subject
del source.report["subjects"][subject_uuid]
delta_description = (
f"{{user}} moved the subject '{source.subject_name}' from report "
f"'{source.report_name}' to report '{target.report_name}'."
)
source_uuids = [source.report_uuid, subject_uuid]
target_uuids = [target_report_uuid, subject_uuid]
return insert_new_report(database, delta_description, (source.report, source_uuids), (target.report, target_uuids))
|
405be11279fe3fa2a65b75ac46518cdaabcb5e90
| 32,155 |
def open_instrument(instr_type):
"""open_visa_instrument implements the public api for each of the drivers for discovering and opening a connection
:param instr_type:
The abstract base class to implement
A dictionary containing the technical specifications of the required equipment
:return:
A instantiated class connected to a valid dmm
"""
instruments = filter_connected(
fixate.config.INSTRUMENTS, fixate.config.DRIVERS.get(instr_type, {})
)
try:
instrument = list(instruments.values())[0]
except IndexError:
raise InstrumentNotConnected("No valid {} found".format(instr_type))
else:
instrument_name = type(instrument).__name__
pub.sendMessage(
"driver_open",
instr_type=instrument_name,
identity=instrument.get_identity(),
)
return instrument
|
1741b94a527a0283efee7466ccc15be09abe1622
| 32,157 |
import jinja2
from datetime import datetime
def thisyear():
"""The current year."""
return jinja2.Markup(datetime.date.today().year)
|
3de970398e1fb55f98a968c0c83411d18e8cd423
| 32,158 |
from unicodedata import east_asian_width
def display_width(str):
"""Return the required over-/underline length for str."""
try:
# Respect &ambiwidth and &tabstop, but old vim may not support this
return vim.strdisplaywidth(str)
except AttributeError:
# Fallback
result = 0
for c in str:
result += 2 if east_asian_width(c) in ('W', 'F') else 1
return result
|
ebeedd159de5c31ea435d44a88fe6fe16ccbcb54
| 32,159 |
def get_short_int(filename, ptr):
"""Jump to position 'ptr' in file and read a 16-bit integer."""
val = get_val(filename, ptr, np.int16)
return int( val )
|
42377a73df1dfbff2593fa43e571e3d269db6449
| 32,160 |
def u32_from_dto(dto: U32DTOType) -> int:
"""Convert DTO to 32-bit int."""
check_overflow(0 <= dto <= U32_MAX)
return dto
|
066ab2c2ed70d69ac8e37515ea815e1305574eea
| 32,161 |
def diagonal_basis_commutes(pauli_a, pauli_b):
"""
Test if `pauli_a` and `pauli_b` share a diagonal basis
Example:
Check if [A, B] with the constraint that A & B must share a one-qubit
diagonalizing basis. If the inputs were [sZ(0), sZ(0) * sZ(1)] then this
function would return True. If the inputs were [sX(5), sZ(4)] this
function would return True. If the inputs were [sX(0), sY(0) * sZ(2)]
this function would return False.
:param pauli_a: Pauli term to check commutation against `pauli_b`
:param pauli_b: Pauli term to check commutation against `pauli_a`
:return: Boolean of commutation result
:rtype: Bool
"""
overlapping_active_qubits = set(pauli_a.get_qubits()) & set(pauli_b.get_qubits())
for qubit_index in overlapping_active_qubits:
if (pauli_a[qubit_index] != 'I' and pauli_b[qubit_index] != 'I' and
pauli_a[qubit_index] != pauli_b[qubit_index]):
return False
return True
|
b95ac0cfe22233432df3a0e0f814c4e0e7af6d0f
| 32,162 |
def cost_using_SigmoidCrossEntropyWithLogits(logits, labels):
"""
Computes the cost using the sigmoid cross entropy
Arguments:
logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)
labels -- vector of labels y (1 or 0)
Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels"
in the TensorFlow documentation. So logits will feed into z, and labels into y.
Returns:
cost -- runs the session of the cost (formula (2))
"""
# Create the placeholders for "logits" (z) and "labels" (y)
lgt = tf.placeholder(tf.float32,name="lgt")
lbl = tf.placeholder(tf.float32,name="lbl")
# Use the loss function
# sigmoid型交叉熵和逻辑
loss_func = tf.nn.sigmoid_cross_entropy_with_logits(logits=lgt,labels=lbl)
# Create a session. See method 1 above.
sess = tf.Session()
# Run the session
cost = sess.run(loss_func,feed_dict={lgt:logits,lbl:labels})
# Close the session. See method 1 above.
sess.close()
return cost
|
7990ee4cb4b4ebfc7b5f1f580be2315ee6667fa5
| 32,163 |
def clone_to_df(clone):
"""Convert a clone to a pandas.DataFrame."""
number_of_mutations = clone.deltas.shape[0]
clone_stats = pd.DataFrame(
np.stack([clone.frequencies for _ in range(number_of_mutations)]),
columns=clone.frequencies.index,
index=clone.deltas.index
)
clone_stats['alt_cn'] = clone.deltas
clone_stats['clone_id'] = clone.clone_id
return clone_stats
|
e383241b024d5deef7022be3d04b36f4ffcee587
| 32,164 |
def reorder_kernel_weight(torch_weight):
""" Reorder a torch kernel weight into a tf format """
len_shape = len(torch_weight.shape)
transpose_target = list(range(len_shape))
transpose_target = transpose_target[2:] + transpose_target[:2][::-1]
return torch_weight.transpose(transpose_target)
|
2e289d768d31d3ed875fbb3613ec0e3061b65cd9
| 32,166 |
def make_windows(x, window_size, horizon):
"""
Creates a window out of
"""
# Create a window of specific window size
window_step = np.expand_dims(np.arange(window_size+horizon), axis=0)
# Create a 2D array of multiple window steps
window_indices = window_step + np.expand_dims(np.arange(len(x)-(window_size+horizon-1)), axis=0).T
# Index on the target array (a time series) with 2D array of multiple window steps
windowed_array = x[window_indices]
windows, labels = create_window_labels(windowed_array, horizon)
return windows, labels
|
4e226e2ee2c3951cd2dfe6cb4a92b9d66e9376bf
| 32,167 |
def interpolate(arr_old, arr_new, I_old, J_old):
# deprecated 2013-08-26
"""
input: array, i, j
output: value
(int(x),
int(y)+1)
+ + (int(x)+1, int(y)+1)
(x,y)
+ + (int(x)+1, int(y))
(int(x),
int(y))
be careful - floor(x)=ceil(x)=x for integer x,
so we really want floor(x) and floor(x)+1
"""
I = I_old.copy()
J = J_old.copy()
arr_new2 = arr_new * 0
arr_new2 += (-999)
height_new, width_new = arr_new.shape
height_old, width_old = arr_old.shape
# set all out-of-bounds to (0,0) for convenience
I = (I>=0) * (I<height_old-1) * I #e.g. i>=0 and i<=4 for i=[0,1,2,3,4], width=5
J = (J>=0) * (J<width_old -1) * J
# the loopings are necessary since we don't know beforehand where the (i_old, j_old)
# would land
for i in range(height_new):
for j in range(width_new):
i0 = int(I[i,j])
j0 = int(J[i,j])
i1 = i0 + 1
j1 = j0 + 1
i_frac = i % 1
j_frac = j % 1
f00 = arr_old[i0,j0]
f01 = arr_old[i0,j1]
f10 = arr_old[i1,j0]
f11 = arr_old[i1,j1]
arr_new2[i, j] = (1-i_frac)*(1-j_frac) * f00 + \
(1-i_frac)*( j_frac) * f01 + \
( i_frac)*(1-j_frac) * f00 + \
( i_frac)*( j_frac) * f00
return arr_new2
|
bcb34c33ca462c43390ff0dd8802d05dc0512dd3
| 32,168 |
def point_seg_sep(ar, br1, br2):
"""Return the minimum separation vector between a point and a line segment,
in 3 dimensions.
Parameters
----------
ar: array-like, shape (3,)
Coordinates of a point.
br1, br2: array-like, shape (3,)
Coordinates for the points of a line segment
Returns
-------
sep: float array, shape (3,)
Separation vector between point and line segment.
"""
v = br2 - br1
w = ar - br1
c1 = np.dot(w, v)
if c1 <= 0.0:
return ar - br1
c2 = np.sum(np.square(v))
if c2 <= c1:
return ar - br2
b = c1 / c2
bc = br1 + b * v
return ar - bc
|
a036f4ea9e9c308002e18e75111aed4408d75cf4
| 32,170 |
from typing import Callable
def shd(node_1: BinaryTreeNode,
node_2: BinaryTreeNode,
hd: Callable[[BinaryTreeNode, BinaryTreeNode], float]) -> float:
"""Structural Hamming distance (SHD)
:param node_1:
:param node_2:
:param hd:
:return:
"""
if node_1 is None or node_2 is None:
return 1
# first get arity of each node
arity_1 = 0
arity_2 = 0
if node_1.has_left_child():
arity_1 += 1
if node_1.has_right_child():
arity_1 += 1
if node_2.has_left_child():
arity_2 += 1
if node_2.has_right_child():
arity_2 += 1
if arity_1 != arity_2:
return 1
else:
if arity_1 == 0:
# both are leaves
return hd(node_1, node_2)
else:
m = arity_1
ham_dist = hd(node_1, node_2)
children_dist_sum = sum([shd(node_1.left, node_2.left, hd), shd(node_1.right, node_2.right, hd)])
return (1 / (m + 1)) * (ham_dist + children_dist_sum)
|
c6aef0189d41887fc4e63991d0176a27b0e1dd8a
| 32,172 |
import numpy
def movmeanstd(ts, m=0):
"""
Calculate the mean and standard deviation within a moving window passing across a time series.
Parameters
----------
ts: Time series to evaluate.
m: Width of the moving window.
"""
if m <= 1:
raise ValueError("Query length must be longer than one")
mInt = int(m)
zero = 0
ts = ts.astype(numpy.longdouble)
# Add zero to the beginning of the cumulative sum of ts
s = numpy.insert(numpy.cumsum(ts), zero, zero)
# Add zero to the beginning of the cumulative sum of ts ** 2
sSq = numpy.insert(numpy.cumsum(ts ** 2), zero, zero)
segSum = s[mInt:] - s[:-mInt]
segSumSq = sSq[mInt:] - sSq[:-mInt]
mov_mean = segSum / m
mov_stdP = (segSumSq / m) - ((segSum / m) ** 2)
if not numpy.all(mov_stdP == 0):
mov_std = numpy.sqrt(numpy.abs(mov_stdP))
else:
mov_std = mov_stdP
return [mov_mean, mov_std]
|
8a9e56db4f26862bff972a3dbfac87f6ea5b8c35
| 32,173 |
def importing_regiondata():
"""
Loads the regiondata
Should convert the year column to proper year
Should immediately create geopandas dataframe
Returns: a dataframe
"""
regiondata = pd.read_stata("data/regiondata.dta")
return regiondata
|
132e4076e941f4451b6bb52c5d81c5895dde0154
| 32,174 |
def rotationFromQuaternion(*args):
"""rotationFromQuaternion(float pA, float pB, float pC, float pD) -> Rotation"""
return _almath.rotationFromQuaternion(*args)
|
e418bf864246ef209291d970e9cf33f0edc3fe8f
| 32,176 |
import re
def get_username(identifier):
"""Checks if a string is a email adress or not."""
pattern = re.compile('.+@\w+\..+')
if pattern.match(identifier):
try:
user = User.objects.get(email=identifier)
except:
raise Http404
else:
return user.username
else:
return identifier
|
de5eb0db99b9580cd210f733cd2e829c84593573
| 32,177 |
def halo_particles(mock_dm_halo):
"""Spherical mock halo."""
def make(N_part=100, seed=None):
random = np.random.RandomState(seed=seed)
mass_dm, pos_dm = mock_dm_halo(N_part=N_part)
vel_dm = random.random_sample(size=(N_part, 3))
return mass_dm, pos_dm, vel_dm
return make
|
36c980c0d81c4a1edf09feec9aafcf1605968bb3
| 32,178 |
def get_sec (hdr,key='BIASSEC') :
"""
Returns the numpy range for a FITS section based on a FITS header entry using the standard format
{key} = '[{col1}:{col2},{row1}:row2}]'
where 1 <= col <= NAXIS1, 1 <= row <= NAXIS2.
"""
if key in hdr :
s = hdr.get(key) # WITHOUT CARD COMMENT
ny = hdr['NAXIS2']
sx = s[s.index('[')+1:s.index(',')].split(':')
sy = s[s.index(',')+1:s.index(']')].split(':')
return [ny-int(sy[1]),ny-int(sy[0])+1,int(sx[0])-1,int(sx[1])]
else :
return None
|
3927e6f5d62818079fa9475976f04dda1824e976
| 32,180 |
import numpy
def _fetch_object_array(cursor, type_tree=None):
"""
_fetch_object_array() fetches arrays with a basetype that is not considered
scalar.
"""
arrayShape = cursor_get_array_dim(cursor)
# handle a rank-0 array by converting it to
# a 1-dimensional array of size 1.
if len(arrayShape) == 0:
arrayShape.append(1)
# now create the (empty) array of the correct type and shape
array = numpy.empty(dtype=object, shape=arrayShape)
# goto the first element
cursor_goto_first_array_element(cursor)
# loop over all elements excluding the last one
flat = array.flat
arraySizeMinOne = array.size - 1
for i in range(arraySizeMinOne):
flat[i] = _fetch_subtree(cursor, type_tree)
cursor_goto_next_array_element(cursor)
# final element then back tp parent scope
flat[arraySizeMinOne] = _fetch_subtree(cursor, type_tree)
cursor_goto_parent(cursor)
return array
|
b4e262ec7fc4dba943ab2f8420add12f59aed4eb
| 32,181 |
import pickle
def load_training_batch(batch_id, batch_size):
"""Load the Preprocessed Training data and return them in batches of <batch_size> or less"""
filename = 'data/cifar_pickle/' + 'batch_' + str(batch_id) + '.pkl'
features, labels = pickle.load(open(filename, mode='rb'))
return batch_features_labels(features, labels, batch_size)
|
4aa762a80dde638d71076a888613606a1ee11a48
| 32,182 |
import logging
import json
def get_record(params,record_uid):
"""Return the referenced record cache"""
record_uid = record_uid.strip()
if not record_uid:
logging.warning('No record UID provided')
return
if not params.record_cache:
logging.warning('No record cache. Sync down first.')
return
if not record_uid in params.record_cache:
logging.warning('Record UID %s not found in cache.' % record_uid)
return
cached_rec = params.record_cache[record_uid]
rec = Record()
try:
data = json.loads(cached_rec['data_unencrypted'])
rec = Record(record_uid)
extra = None
if 'extra_unencrypted' in cached_rec:
extra = json.loads(cached_rec['extra_unencrypted'])
rec.load(data, revision=cached_rec['revision'], extra=extra)
if not resolve_record_view_path(params, record_uid):
rec.mask_password()
if cached_rec.get('version') == 3:
rec.record_type = RecordV3.get_record_type_name(data)
rec.login = RecordV3.get_record_field_value(cached_rec.get('data_unencrypted'), 'login')
rec.login_url = RecordV3.get_record_field_value(cached_rec.get('data_unencrypted'), 'url')
# if 'version' in cached_rec and cached_rec['version'] in (3, 4):
# if 'data_unencrypted' in cached_rec:
# version = cached_rec.get('version') or 0
# data_unencrypted = json.loads(cached_rec['data_unencrypted'])
# if version == 3:
# rec_type = data_unencrypted.get('type') or ''
# if (rec_type and rec_type.strip()):
# rec.login = 'type: ' + rec_type.strip()
# elif version == 4:
# fname = data_unencrypted.get('name') or ''
# if (fname and fname.strip()):
# rec.login = 'file: ' + fname.strip()
except:
logging.error('**** Error decrypting record %s', record_uid)
return rec
|
7fce71c2f90387272a9c9b0a61ad4cccabf830f5
| 32,185 |
def get_probabilities(path, seq_len, model, outfile, mode):
"""
Get network-assigned probabilities
Parameters:
filename (str): Input file to be loaded
seq_len (int): Length of input DNA sequence
Returns:
probas (ndarray): An array of probabilities for the test set
true labels (ndarray): True test-set labels
"""
# Inputing a range of default values here, can be changed later.
data_generator = merge_generators(path=path, batchsize=1000,
seqlen=seq_len, mode='nr')
# Load the keras model
# model = load_model(model_file)
test_on_batch(data_generator, model, outfile, mode)
probas = np.loadtxt(outfile)
true_labels = np.loadtxt(path['labels'])
return true_labels, probas
|
a75bc11704538d082ecf91a61765f4412ec2c75d
| 32,186 |
from azure.mgmt.sql import SqlManagementClient
from azure.cli.core.commands.client_factory import get_mgmt_service_client
def get_sql_management_client(_):
"""
Gets the SQL management client
"""
return get_mgmt_service_client(SqlManagementClient)
|
6f67408fdecbe9b1a70ffbc34a4871c796e0f9f6
| 32,188 |
def string_to_gast(node):
"""
handles primitive string base case
example: "hello"
exampleIn: Str(s='hello')
exampleOut: {'type': 'str', 'value': 'hello'}
"""
return {"type": "str", "value": node.s}
|
a3dcd89e893c6edd4a9ba6095cd107bb48cc9782
| 32,189 |
def ed25519_generate_key_pair_from_secret(secret):
"""
Generate a new key pair.
Args:
secret (:class:`string`): A secret that serves as a seed
Returns:
A tuple of (private_key, public_key) encoded in base58.
"""
# if you want to do this correctly, use a key derivation function!
if not isinstance(secret, bytes):
secret = secret.encode()
hash_bytes = sha3.keccak_256(secret).digest()
sk = Ed25519SigningKeyFromHash.generate(hash_bytes=hash_bytes)
# Private key
private_value_base58 = sk.encode(encoding='base58')
# Public key
public_value_compressed_base58 = sk.get_verifying_key().encode(encoding='base58')
return private_value_base58, public_value_compressed_base58
|
25b8c18289c4cf8f09a7ba937fc8f9645406e9f2
| 32,190 |
from typing import List
import math
def align_tiles_naive(request: AlignNaiveRequest,
tiles: List[TileModelDB]) -> List[AlignedTiledModel]:
"""
performs a naive aligning of the tiles simply based on the given rows and method.
does not perform any advanced stitching or pixel checking.
Does not use the row and column index, instead just iterates over the tiles in the order they are received.
Meant to be called in a separate thread due it being cpu bound.
"""
if len(tiles) == 0:
return []
# assumes they are all the same size
width_px = tiles[0].width_px
height_px = tiles[0].height_px
columns = math.ceil(len(tiles) / request.rows)
row = 0
col = 0
aligned_tiles: List[AlignedTiledModel] = []
for index, tile in enumerate(tiles):
if request.method == "byRow":
col = index % columns
else:
row = index % request.rows
tile = tile.dict()
tile["offset_x"] = col * width_px
tile["offset_y"] = row * height_px
aligned_tiles.append(AlignedTiledModel.parse_obj(tile))
if request.method == "byRow":
if col == columns - 1:
row = row + 1
else:
if row == request.rows - 1:
col = col + 1
return aligned_tiles
|
b279273d800a6884ad95f43f0a6a6f3be1ac3243
| 32,191 |
def estimate_operating_empty_mass(mtom, fuse_length, fuse_width, wing_area,
wing_span, TURBOPROP):
""" The function estimates the operating empty mass (OEM)
Source: Raymer, D.P. "Aircraft design: a conceptual approach"
AIAA educational Series, Fourth edition (2006).
Args:
mtom (float): Maximum take off mass [kg]
fuse_length (float): Fuselage length [m]
fuse_width (float): Fuselage width [m]
wing_area (float): Wing area [m^2]
wing_span (float): Wing span [m]
TURBOPROP (bool): True if the the engines are turboprop False otherwise.
Returns:
oem (float): Operating empty mass [kg]
"""
G = 9.81 # [m/s^2] Acceleration of gravity.
KC = 1.04 # [-] Wing with variable sweep (1.0 otherwhise).
if TURBOPROP:
C = -0.05 # [-] General aviation twin turboprop
if fuse_length < 15.00:
A = 0.96
elif fuse_length < 30.00:
A = 1.07
else:
A = 1.0
else:
C = -0.08 # [-] General aviation twin engines
if fuse_length < 30.00:
A = 1.45
elif fuse_length < 35.00:
A = 1.63
elif fuse_length < 60.00:
if wing_span > 61:
A = 1.63
else:
A = 1.57
else:
A = 1.63
oem = round((A * KC * (mtom*G)**(C)) * mtom,3)
return oem
|
5b9bed8cef76f3c10fed911087727f0164cffab2
| 32,192 |
def var_gaussian(r, level=5, modified=False):
"""
Returns the Parametric Gaussian VaR of a Series or DataFrame
"""
# compute the Z score assuming it was Gaussian
z = norm.ppf(level/100)
if modified:
# modify the Z score based on observed skewness and kurtosis
s = skewness(r)
k = kurtosis(r)
z = (z +
(z**2 - 1) * s/6 +
(z**3 - 3*z) * (k-3)/24 -
(2*z**3 - 5*z) * (s**2)/36
)
return -(r.mean() + z*r.std(ddof=0))
|
18d3b1ee2228fafaaf977b216245c8217e77396b
| 32,193 |
def grid_search_serial(data, greens, misfit, grid):
"""
Grid search over moment tensors. For each moment tensor in grid, generates
synthetics and evaluates data misfit
"""
results = np.zeros(grid.size)
count = 0
for mt in grid:
print grid.index
for key in data:
results[count] += misfit[key](data[key], greens[key], mt)
count += 1
return results
|
fa0a2c19cfbfa685d59f3effea7b3f7478999f88
| 32,194 |
def getSqTransMoment(system):
"""//Input SYSTEM is a string with both the molecular species AND the band "system"
// Electronic transition moment, Re, needed for "Line strength", S = |R_e|^2*q_v'v" or just |R_e|^2
// //Allen's Astrophysical quantities, 4.12.2 - 4.13.1
// // ROtational & vibrational constants for TiO states:, p. 87, Table 4.17"""
#// Square electronic transition moment, |Re|^2,
#// needed for "Line strength", S = |R_e|^2*q_v'v" or just |R_e|^2
#// // //Allen's Astrophysical quantities, 4.12.2 - 4.13.1
#// As of Feb 2017 - try the band-head value R_00^2 from last column of table:
RSqu = 0.0 #//default initialization
#TiO alpha system
if ("TiO_C3Delta_X3Delta" == system):
RSqu = 0.84
#TiO beta system
if ("TiO_c1Phi_a1Delta" == system):
RSqu = 4.63
#TiO gamma system
if ("TiO_A3Phi_X3Delta" == system):
RSqu = 5.24
#CH A^2Delta_X^2Pi system - "G band" at 4300 A
if ("CH_A2Delta_X2Pi" == system):
RSqu = 0.081 #mean of two values given
#//
return RSqu
|
19c5311f7d8fde4bb834d809fd2f6ed7dd2c036e
| 32,195 |
def volumes(assets,
start,
end,
frequency='daily',
symbol_reference_date=None,
start_offset=0,
use_amount=False):
"""
获取资产期间成交量(或成交额)
Parameters
----------
assets (int/str/Asset or iterable of same)
Identifiers for assets to load. Integers are interpreted as sids.
Strings are interpreted as symbols.
start (str or pd.Timestamp)
Start date of data to load.
end (str or pd.Timestamp)
End date of data to load.
frequency ({'minute', 'daily'}, optional)
Frequency at which to load data. Default is ‘daily’.
symbol_reference_date (pd.Timestamp, optional)
Date as of which to resolve strings as tickers. Default is the current day.
start_offset (int, optional)
Number of periods before start to fetch. Default is 0.
This is most often useful for calculating returns.
use_amount:bool
是否使用成交额字段。默认为否。
如使用成交额,则读取期间成交额数据。
Returns:
volumes (pd.Series or pd.DataFrame)
Pandas object containing volumes for the requested asset(s) and dates.
Data is returned as a pd.Series if a single asset is passed.
Data is returned as a pd.DataFrame if multiple assets are passed.
"""
field = 'amount' if use_amount else 'volume'
return prices(assets, start, end, frequency, field, symbol_reference_date,
start_offset)
|
e2e0a7d6bd8b659e070299d00699d8cae6ed3c9f
| 32,196 |
def qipy_action(cd_to_tmpdir):
""" QiPy Action """
return QiPyAction()
|
7c6d828c4baf29d2f457f02b0b54e6c967d96cb3
| 32,198 |
def vectorized_range(start, end):
""" Return an array of NxD, iterating from the start to the end"""
N = int(np.max(end - start)) + 1
idxes = np.floor(np.arange(N) * (end - start)[:, None] / N + start[:, None]).astype('int')
return idxes
|
cef2304639dbac3c1a1dfbd9ae928f813bd65b05
| 32,200 |
import random
def stratified(W, M):
"""Stratified resampling.
"""
su = (random.rand(M) + np.arange(M)) / M
return inverse_cdf(su, W)
|
4f1ceb6840240178df312fee266fe612abb3193f
| 32,201 |
def is_configured():
"""Return if Azure account is configured."""
return False
|
5662656b513330e0a05fa25decc03c04b5f367fa
| 32,202 |
def box_strings(*strings: str, width: int = 80) -> str:
"""Centre-align and visually box some strings.
Args:
*strings (str): Strings to box. Each string will be printed on its own
line. You need to ensure the strings are short enough to fit in the
box (width-6) or the results will not be as intended.
width (int, optional): Width of the box. Defaults to 80.
Returns:
str: The strings, centred and surrounded by a border box.
"""
lines = ["+" + "-"*(width-2) + "+", "|" + " "*(width-2) + "|"]
lines.extend(f'| {string.center(width-6)} |' for string in strings)
lines.extend(lines[:2][::-1])
return "\n".join(lines)
|
b47aaf020cf121b54d2b588bdec3067a3b83fd27
| 32,203 |
import traceback
def exceptions(e):
"""This exceptions handler manages Flask/Werkzeug exceptions.
For Renku exception handlers check ``service/decorators.py``
"""
# NOTE: Capture werkzeug exceptions and propagate them to sentry.
capture_exception(e)
# NOTE: Capture traceback for dumping it to the log.
tb = traceback.format_exc()
if hasattr(e, "code") and e.code == 404:
service_log.error(
"{} {} {} {} 404 NOT FOUND\n{}".format(
request.remote_addr, request.method, request.scheme, request.full_path, tb
)
)
return error_response(HTTP_SERVER_ERROR - e.code, e.name)
if hasattr(e, "code") and e.code >= 500:
service_log.error(
"{} {} {} {} 5xx INTERNAL SERVER ERROR\n{}".format(
request.remote_addr, request.method, request.scheme, request.full_path, tb
)
)
return error_response(HTTP_SERVER_ERROR - e.code, e.name)
# NOTE: Werkzeug exceptions should be covered above, following line is for unexpected HTTP server errors.
return error_response(HTTP_SERVER_ERROR, str(e))
|
574c97b301f54785ae30dbfc3cc5176d5352cb82
| 32,204 |
import torch
def top_k_top_p_filtering(logits, top_k, top_p, filter_value=-float("Inf")):
"""
top_k或top_p解码策略,仅保留top_k个或累积概率到达top_p的标记,其他标记设为filter_value,后续在选取标记的过程中会取不到值设为无穷小。
Args:
logits: 预测结果,即预测成为词典中每个词的分数
top_k: 只保留概率最高的top_k个标记
top_p: 只保留概率累积达到top_p的标记
filter_value: 过滤标记值
Returns:
"""
# logits的维度必须为2,即size:[batch_size, vocab_size]
assert logits.dim() == 2
# 获取top_k和字典大小中较小的一个,也就是说,如果top_k大于字典大小,则取字典大小个标记
top_k = min(top_k, logits[0].size(-1))
# 如果top_k不为0,则将在logits中保留top_k个标记
if top_k > 0:
# 由于有batch_size个预测结果,因此对其遍历,选取每个预测结果的top_k标记
for logit in logits:
indices_to_remove = logit < torch.topk(logit, top_k)[0][..., -1, None]
logit[indices_to_remove] = filter_value
# 如果top_p不为0,则将在logits中保留概率值累积达到top_p的标记
if top_p > 0.0:
# 对logits进行递减排序
sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
# 对排序后的结果使用softmax归一化,再获取累积概率序列
# 例如:原始序列[0.1, 0.2, 0.3, 0.4],则变为:[0.1, 0.3, 0.6, 1.0]
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# 删除累积概率高于top_p的标记
sorted_indices_to_remove = cumulative_probs > top_p
# 将索引向右移动,使第一个标记也保持在top_p之上
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
for index, logit in enumerate(logits):
# 由于有batch_size个预测结果,因此对其遍历,选取每个预测结果的累积概率达到top_p的标记
indices_to_remove = sorted_indices[index][sorted_indices_to_remove[index]]
logit[indices_to_remove] = filter_value
return logits
|
74cf4a6cf4622ad1c9b124089cd84ddb07bdb7be
| 32,205 |
def get_alb(alb_name, aws_auth_cred):
"""
Find and return loadbalancers of mentioned name
Args:
alb_name (str): Load balancer name
aws_auth (dict): Dict containing AWS credentials
Returns:
alb (dict): Loadbalancer details
"""
client = get_elbv2_client(aws_auth_cred)
try:
response = client.describe_load_balancers(Names=[alb_name])
albs = response['LoadBalancers']
return albs.pop() if len(albs) else None
except:
return None
|
a31ae3067d96008622b43c57ffd1b0de74eceaa0
| 32,206 |
def align_left_position(anchor, size, alignment, margin):
"""Find the position of a rectangle to the left of a given anchor.
:param anchor: A :py:class:`~skald.geometry.Rectangle` to anchor the
rectangle to.
:param size: The :py:class:`~skald.geometry.Size` of the rectangle.
:param alignment: The :py:class:`~skald.definitions.Alignment` of the
rectangle.
:param margin: The margin, in pixels, the rectangle must have from the
anchor.
"""
x = anchor.left - size.width - margin
y = vertical_align(anchor, size, alignment)
return Point(x=x, y=y)
|
2af1c6175960313958cc51d0180ebc4f6ed9dc41
| 32,207 |
def quickdraw_to_linestring(qd_image):
"""Returns a Shapely MultiLineString for the provided quickdraw image.
This MultiLineString can be passed to vsketch
"""
linestrings = []
for i in range(0, len(qd_image["image"])):
line = zip(qd_image["image"][i][0], qd_image["image"][i][1])
linestrings.append(tuple(line))
return MultiLineString(linestrings)
|
39957b9a36a59b33a2fb5abc91f7479c946515a2
| 32,208 |
import functools
def build(image_resizer_config):
"""Builds callable for image resizing operations.
Args:
image_resizer_config: image_resizer.proto object containing parameters for
an image resizing operation.
Returns:
image_resizer_fn: Callable for image resizing. This callable always takes
a rank-3 image tensor (corresponding to a single image) and returns a
rank-3 image tensor, possibly with new spatial dimensions.
Raises:
ValueError: if `image_resizer_config` is of incorrect type.
ValueError: if `image_resizer_config.image_resizer_oneof` is of expected
type.
ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer
is used.
"""
if not isinstance(image_resizer_config, image_resizer_pb2.ImageResizer):
raise ValueError('image_resizer_config not of type '
'image_resizer_pb2.ImageResizer.')
if image_resizer_config.WhichOneof(
'image_resizer_oneof') == 'keep_aspect_ratio_resizer':
keep_aspect_ratio_config = image_resizer_config.keep_aspect_ratio_resizer
if not (keep_aspect_ratio_config.min_dimension
<= keep_aspect_ratio_config.max_dimension):
raise ValueError('min_dimension > max_dimension')
return functools.partial(
preprocessor.resize_to_range,
min_dimension=keep_aspect_ratio_config.min_dimension,
max_dimension=keep_aspect_ratio_config.max_dimension)
if image_resizer_config.WhichOneof(
'image_resizer_oneof') == 'fixed_shape_resizer':
fixed_shape_resizer_config = image_resizer_config.fixed_shape_resizer
return functools.partial(preprocessor.resize_image,
new_height=fixed_shape_resizer_config.height,
new_width=fixed_shape_resizer_config.width)
raise ValueError('Invalid image resizer option.')
|
75df1c37397e88322113aa8822d60053ae54981d
| 32,210 |
from typing import Optional
from typing import Tuple
def plotly_protein_structure_graph(
G: nx.Graph,
plot_title: Optional[str] = None,
figsize: Tuple[int, int] = (620, 650),
node_alpha: float = 0.7,
node_size_min: float = 20.0,
node_size_multiplier: float = 20.0,
label_node_ids: bool = True,
node_colour_map=plt.cm.plasma,
edge_color_map=plt.cm.plasma,
colour_nodes_by: str = "degree",
colour_edges_by: str = "kind",
) -> go.Figure:
"""
Plots protein structure graph using plotly.
:param G: nx.Graph Protein Structure graph to plot
:type G: nx.Graph
:param plot_title: Title of plot, defaults to None
:type plot_title: str, optional
:param figsize: Size of figure, defaults to (620, 650)
:type figsize: Tuple[int, int]
:param node_alpha: Controls node transparency, defaults to 0.7
:type node_alpha: float
:param node_size_min: Specifies node minimum size
:type node_size_min: float
:param node_size_multiplier: Scales node size by a constant. Node sizes reflect degree.
:type node_size_multiplier: float
:param label_node_ids: bool indicating whether or not to plot node_id labels
:type label_node_ids: bool
:param node_colour_map: colour map to use for nodes
:type node_colour_map: plt.cm
:param edge_color_map: colour map to use for edges
:type edge_color_map: plt.cm
:param colour_nodes_by: Specifies how to colour nodes. "degree", "seq_position" or a node feature
:type colour_edges_by: str
:param colour_edges_by: Specifies how to colour edges. Currently only "kind" is supported
:type colour_nodes_by: str
:returns: Plotly Graph Objects plot
:rtype: go.Figure
"""
# Get Node Attributes
pos = nx.get_node_attributes(G, "coords")
# Get node colours
node_colors = colour_nodes(
G, colour_map=node_colour_map, colour_by=colour_nodes_by
)
edge_colors = colour_edges(
G, colour_map=edge_color_map, colour_by=colour_edges_by
)
# 3D network plot
x_nodes = []
y_nodes = []
z_nodes = []
node_sizes = []
node_labels = []
# Loop on the pos dictionary to extract the x,y,z coordinates of each node
for i, (key, value) in enumerate(pos.items()):
x_nodes.append(value[0])
y_nodes.append(value[1])
z_nodes.append(value[2])
node_sizes.append(node_size_min + node_size_multiplier * G.degree[key])
if label_node_ids:
node_labels.append(list(G.nodes())[i])
nodes = go.Scatter3d(
x=x_nodes,
y=y_nodes,
z=z_nodes,
mode="markers",
marker={
"symbol": "circle",
"color": node_colors,
"size": node_sizes,
"opacity": node_alpha,
},
text=list(G.nodes()),
hoverinfo="text+x+y+z",
)
# Loop on the list of edges to get the x,y,z, coordinates of the connected nodes
# Those two points are the extrema of the line to be plotted
x_edges = []
y_edges = []
z_edges = []
for node_a, node_b in G.edges(data=False):
x_edges.extend([pos[node_a][0], pos[node_b][0], None])
y_edges.extend([pos[node_a][1], pos[node_b][1], None])
z_edges.extend([pos[node_a][2], pos[node_b][2], None])
axis = dict(
showbackground=False,
showline=False,
zeroline=False,
showgrid=False,
showticklabels=False,
title="",
)
edges = go.Scatter3d(
x=x_edges,
y=y_edges,
z=z_edges,
mode="lines",
line={"color": edge_colors, "width": 10},
text=[
" / ".join(list(edge_type))
for edge_type in nx.get_edge_attributes(G, "kind").values()
],
hoverinfo="text",
)
fig = go.Figure(
data=[nodes, edges],
layout=go.Layout(
title=plot_title,
width=figsize[0],
height=figsize[1],
showlegend=False,
scene=dict(
xaxis=dict(axis),
yaxis=dict(axis),
zaxis=dict(axis),
),
margin=dict(t=100),
),
)
return fig
|
4aae1ce763daa06627fe43e31780fa61cd1886a4
| 32,211 |
def mag_scale_rel_to_hazardlib(mag_scale_rel, use_default=False):
"""
Returns the magnitude scaling relation in a format readable by
openquake.hazardlib
"""
if isinstance(mag_scale_rel, BaseMSR):
return mag_scale_rel
elif isinstance(mag_scale_rel, str):
if not mag_scale_rel in SCALE_RELS.keys():
raise ValueError('Magnitude scaling relation %s not supported!'
% mag_scale_rel)
else:
return SCALE_RELS[mag_scale_rel]()
else:
if use_default:
# Returns the Wells and Coppersmith string
return WC1994()
else:
raise ValueError('Magnitude Scaling Relation Not Defined!')
|
7db46083d4c05e3f53b4a5d064c923937bb5fe2a
| 32,213 |
import regex
import tokenize
def __get_words(text, by_spaces):
"""
Helper function which splits the given text string into words. If by_spaces is false, then text like
'01-02-2014' will be split into 3 separate words. For backwards compatibility, this is the default for all
expression functions.
:param text: the text to split
:param by_spaces: whether words should be split only by spaces or by punctuation like '-', '.' etc
"""
if by_spaces:
splits = regex.split(r'\s+', text, flags=regex.MULTILINE | regex.UNICODE | regex.V0)
return [split for split in splits if split] # return only non-empty
else:
return tokenize(text)
|
289d7cc58d165355a4e5a25db016dbe2e6aa74ec
| 32,215 |
from typing import Any
def gera_paragrafo(data: pd.DataFrame) -> pd.DataFrame:
"""docstring for gera_paragrafo"""
data[["div_sup", "par"]] = data.location.str.split(".", n=1, expand=True)
data.dropna(inplace=True)
j: Any = data.groupby(["author", "text", "file", "div_sup", "par", "genero"]).agg(
{"lemma": lambda x: " ".join(x)}
)
i: Any = j.reset_index()
return i.loc[:, :]
|
04285d5df307e87b8adc389cf2f03d9ef9b44276
| 32,216 |
def _parse_boolean(xml_boolean):
"""Converts strings "true" and "false" from XML files to Python bool"""
if xml_boolean is not None:
assert xml_boolean in ["true", "false"], \
"The boolean string must be \"true\" or \"false\""
return {"true": True, "false": False}[xml_boolean]
|
6d9d1b617f8935d1684bd24bbea06d00ca2a5b4a
| 32,217 |
def to_heterogeneous(G, ntypes, etypes, ntype_field=NTYPE,
etype_field=ETYPE, metagraph=None):
"""Convert a homogeneous graph to a heterogeneous graph and return.
The input graph should have only one type of nodes and edges. Each node and edge
stores an integer feature as its type ID
(specified by :attr:`ntype_field` and :attr:`etype_field`).
DGL uses it to retrieve the type names stored in the given
:attr:`ntypes` and :attr:`etypes` arguments.
The function will automatically distinguish edge types that have the same given
type IDs but different src and dst type IDs. For example, it allows both edges A and B
to have the same type ID 0, but one has (0, 1) and the other as (2, 3) as the
(src, dst) type IDs. In this case, the function will "split" edge type 0 into two types:
(0, ty_A, 1) and (2, ty_B, 3). In another word, these two edges share the same edge
type name, but can be distinguished by an edge type triplet.
The function stores the node and edge IDs in the input graph using the ``dgl.NID``
and ``dgl.EID`` names in the ``ndata`` and ``edata`` of the resulting graph.
It also copies any node/edge features from :attr:`G` to the returned heterogeneous
graph, except for reserved fields for storing type IDs (``dgl.NTYPE`` and ``dgl.ETYPE``)
and node/edge IDs (``dgl.NID`` and ``dgl.EID``).
Parameters
----------
G : DGLGraph
The homogeneous graph.
ntypes : list[str]
The node type names.
etypes : list[str]
The edge type names.
ntype_field : str, optional
The feature field used to store node type. (Default: ``dgl.NTYPE``)
etype_field : str, optional
The feature field used to store edge type. (Default: ``dgl.ETYPE``)
metagraph : networkx MultiDiGraph, optional
Metagraph of the returned heterograph.
If provided, DGL assumes that G can indeed be described with the given metagraph.
If None, DGL will infer the metagraph from the given inputs, which could be
costly for large graphs.
Returns
-------
DGLGraph
A heterogeneous graph.
Notes
-----
The returned node and edge types may not necessarily be in the same order as
``ntypes`` and ``etypes``.
Examples
--------
The following example uses PyTorch backend.
>>> import dgl
>>> import torch
>>> hg = dgl.heterograph({
... ('user', 'develops', 'activity'): (torch.tensor([0, 1]), torch.tensor([1, 2])),
... ('developer', 'develops', 'game'): (torch.tensor([0, 1]), torch.tensor([0, 1]))
... })
>>> print(hg)
Graph(num_nodes={'activity': 3, 'developer': 2, 'game': 2, 'user': 2},
num_edges={('developer', 'develops', 'game'): 2, ('user', 'develops', 'activity'): 2},
metagraph=[('developer', 'game', 'develops'), ('user', 'activity', 'develops')])
We first convert the heterogeneous graph to a homogeneous graph.
>>> g = dgl.to_homogeneous(hg)
>>> print(g)
Graph(num_nodes=9, num_edges=4,
ndata_schemes={'_TYPE': Scheme(shape=(), dtype=torch.int64),
'_ID': Scheme(shape=(), dtype=torch.int64)}
edata_schemes={'_TYPE': Scheme(shape=(), dtype=torch.int64),
'_ID': Scheme(shape=(), dtype=torch.int64)})
>>> g.ndata
{'_TYPE': tensor([0, 0, 0, 1, 1, 2, 2, 3, 3]), '_ID': tensor([0, 1, 2, 0, 1, 0, 1, 0, 1])}
Nodes 0, 1, 2 for 'activity', 3, 4 for 'developer', 5, 6 for 'game', 7, 8 for 'user'
>>> g.edata
{'_TYPE': tensor([0, 0, 1, 1]), '_ID': tensor([0, 1, 0, 1])}
Edges 0, 1 for ('developer', 'develops', 'game'), 2, 3 for ('user', 'develops', 'activity')
Now convert the homogeneous graph back to a heterogeneous graph.
>>> hg_2 = dgl.to_heterogeneous(g, hg.ntypes, hg.etypes)
>>> print(hg_2)
Graph(num_nodes={'activity': 3, 'developer': 2, 'game': 2, 'user': 2},
num_edges={('developer', 'develops', 'game'): 2, ('user', 'develops', 'activity'): 2},
metagraph=[('developer', 'game', 'develops'), ('user', 'activity', 'develops')])
Retrieve the original node/edge IDs.
>>> hg_2.ndata[dgl.NID]
{'activity': tensor([0, 1, 2]),
'developer': tensor([3, 4]),
'game': tensor([5, 6]),
'user': tensor([7, 8])}
>>> hg_2.edata[dgl.EID]
{('developer', 'develops', 'game'): tensor([0, 1]),
('user', 'develops', 'activity'): tensor([2, 3])}
See Also
--------
to_homogeneous
"""
if (hasattr(G, 'ntypes') and len(G.ntypes) > 1
or hasattr(G, 'etypes') and len(G.etypes) > 1):
raise DGLError('The input graph should be homogeneous and have only one '
' type of nodes and edges.')
num_ntypes = len(ntypes)
idtype = G.idtype
device = G.device
ntype_ids = F.asnumpy(G.ndata[ntype_field])
etype_ids = F.asnumpy(G.edata[etype_field])
# relabel nodes to per-type local IDs
ntype_count = np.bincount(ntype_ids, minlength=num_ntypes)
ntype_offset = np.insert(np.cumsum(ntype_count), 0, 0)
ntype_ids_sortidx = np.argsort(ntype_ids)
ntype_local_ids = np.zeros_like(ntype_ids)
node_groups = []
for i in range(num_ntypes):
node_group = ntype_ids_sortidx[ntype_offset[i]:ntype_offset[i+1]]
node_groups.append(node_group)
ntype_local_ids[node_group] = np.arange(ntype_count[i])
src, dst = G.all_edges(order='eid')
src = F.asnumpy(src)
dst = F.asnumpy(dst)
src_local = ntype_local_ids[src]
dst_local = ntype_local_ids[dst]
# a 2D tensor of shape (E, 3). Each row represents the (stid, etid, dtid) tuple.
edge_ctids = np.stack([ntype_ids[src], etype_ids, ntype_ids[dst]], 1)
# infer metagraph and canonical edge types
# No matter which branch it takes, the code will generate a 2D tensor of shape (E_m, 3),
# E_m is the set of all possible canonical edge tuples. Each row represents the
# (stid, dtid, dtid) tuple. We then compute a 2D tensor of shape (E, E_m) using the
# above ``edge_ctids`` matrix. Each element i,j indicates whether the edge i is of the
# canonical edge type j. We can then group the edges of the same type together.
if metagraph is None:
canonical_etids, _, etype_remapped = \
utils.make_invmap(list(tuple(_) for _ in edge_ctids), False)
etype_mask = (etype_remapped[None, :] == np.arange(len(canonical_etids))[:, None])
else:
ntypes_invmap = {nt: i for i, nt in enumerate(ntypes)}
etypes_invmap = {et: i for i, et in enumerate(etypes)}
canonical_etids = []
for i, (srctype, dsttype, etype) in enumerate(metagraph.edges(keys=True)):
srctype_id = ntypes_invmap[srctype]
etype_id = etypes_invmap[etype]
dsttype_id = ntypes_invmap[dsttype]
canonical_etids.append((srctype_id, etype_id, dsttype_id))
canonical_etids = np.asarray(canonical_etids)
etype_mask = (edge_ctids[None, :] == canonical_etids[:, None]).all(2)
edge_groups = [etype_mask[i].nonzero()[0] for i in range(len(canonical_etids))]
data_dict = dict()
canonical_etypes = []
for i, (stid, etid, dtid) in enumerate(canonical_etids):
src_of_etype = src_local[edge_groups[i]]
dst_of_etype = dst_local[edge_groups[i]]
canonical_etypes.append((ntypes[stid], etypes[etid], ntypes[dtid]))
data_dict[canonical_etypes[-1]] = \
(src_of_etype, dst_of_etype)
hg = heterograph(data_dict,
{ntype: count for ntype, count in zip(ntypes, ntype_count)},
idtype=idtype, device=device)
ntype2ngrp = {ntype : node_groups[ntid] for ntid, ntype in enumerate(ntypes)}
# features
for key, data in G.ndata.items():
if key in [ntype_field, NID]:
continue
for ntid, ntype in enumerate(hg.ntypes):
rows = F.copy_to(F.tensor(ntype2ngrp[ntype]), F.context(data))
hg._node_frames[ntid][key] = F.gather_row(data, rows)
for key, data in G.edata.items():
if key in [etype_field, EID]:
continue
for etid in range(len(hg.canonical_etypes)):
rows = F.copy_to(F.tensor(edge_groups[etid]), F.context(data))
hg._edge_frames[hg.get_etype_id(canonical_etypes[etid])][key] = \
F.gather_row(data, rows)
# Record the original IDs of the nodes/edges
for ntid, ntype in enumerate(hg.ntypes):
hg._node_frames[ntid][NID] = F.copy_to(F.tensor(ntype2ngrp[ntype]), device)
for etid in range(len(hg.canonical_etypes)):
hg._edge_frames[hg.get_etype_id(canonical_etypes[etid])][EID] = \
F.copy_to(F.tensor(edge_groups[etid]), device)
return hg
|
f1792d78e4b94c5f3d4f72ef6cfcbcb14c7d1158
| 32,218 |
def Solution(image):
"""
input: same size (256*256) rgb image
output: the label of the image
"l" -> left
"m" -> middle
"r" -> right
"o" -> other(NO target)
if no target detected, return "o", which is the initial value
"""
#initial two point for locatate the target area
topLeft = [0,0]
bottomRight = [0,0]
pred_label = "o" #initial the recognition label
#make image to gray
thresh = 200
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
image = cv2.threshold(image, thresh, 255, cv2.THRESH_BINARY)[1]
#find contours in grey image
C,h= cv2.findContours(image, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
if len(C) > 0:
for i in range(len(C)):
c = C[i]
area = cv2.contourArea(c)
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
# convert all coordinates floating point values to int
box = np.int0(box)
#limit the area we interested
if area>500:
topLeft = [box[1][0],box[1][1]]
bottomRight = [box[3][0],box[3][1]]
#cut the traffic sign with slight remnants around
cut = image[topLeft[1]:bottomRight[1],topLeft[0]:bottomRight[0]]
Ishape = cut.shape
#limit the area we interested again
if Ishape[0] <30 or Ishape[1] < 40:
continue
else:
#use two different template to match
#each return two position with is the topleft and the bottomright of the processed image
#t1.jpg is the x-like character
topleft_1,bottomright_1 = Matcher(cut,"./template/t2.jpg")
topleft_2,bottomright_2= Matcher(cut,"./template/t1.jpg")
#if not none
if topleft_1 and topleft_2 and bottomright_1 and bottomright_2:
pred_label = helper(topleft_1,bottomright_1,topleft_2,bottomright_2,Ishape=Ishape)
return pred_label
|
11fb49c96cb7cbfdfb522d6794f148cd6354dcf9
| 32,219 |
def index_to_tag(v, index_tag):
"""
:param v: vector
:param index_tag:
:return:
"""
idx = np.nonzero(v)
tags = [index_tag[i] for i in idx[0]]
return ' '.join(tags)
|
ebf30632bbf8a7b399461b191c33f345f04c4cc2
| 32,220 |
def first_phrase_span(utterance, phrases):
"""Returns the span (start, end+1) of the first phrase from the given list
that is found in the utterance. Returns (-1, -1) if no phrase is found.
:param utterance: The utterance to search in
:param phrases: a list of phrases to be tried (in the given order)
:rtype: tuple
"""
for phrase in phrases:
pos = phrase_pos(utterance, phrase)
if pos != -1:
return pos, pos + len(phrase)
return -1, -1
|
f3be7bd976c60467bcf51edfb15d3736e00568a8
| 32,222 |
from datetime import datetime
def parse_date(value):
"""Parse a string and return a datetime.date.
Raise ValueError if the input is well formatted but not a valid date.
Return None if the input isn't well formatted.
"""
match = date_re.match(value)
if match:
kw = {k: int(v) for k, v in list(match.groupdict().items())}
return datetime.date(**kw)
|
b32cc64bab460e1384492b7cb694b8263431625f
| 32,223 |
import scipy
def construct_Dfunc(delays, plot=False):
"""Return interpolation functions fD(t) and fdD(t).
fD(t) is the delay between infection and reporting at reporting time t.
fdD(t) is its derivative.
Parameter:
- delays: tuples (time_report, delay_days)
- plot: whether to generate a plot.
Return:
- fD: interpolation function for D(t) with t in nanoseconds.
- fdD: interpolation function for dD/dt.
(taking time in ns but returning dD per day.)
- delay_str: delay string e.g. '7' or '7-9'
"""
ts0 = [float(pd.to_datetime(x[0]).to_datetime64()) for x in delays]
Ds0 = [float(x[1]) for x in delays]
if len(delays) == 1:
# prevent interp1d complaining.
ts0 = [ts0[0], ts0[0]+1e9]
Ds0 = np.concatenate([Ds0, Ds0])
# delay function as linear interpolation;
# nanosecond timestamps as t value.
fD0 = scipy.interpolate.interp1d(
ts0, Ds0, kind='linear', bounds_error=False,
fill_value=(Ds0[0], Ds0[-1])
)
# construct derivative dD/dt, smoothen out
day = 1e9*86400 # one day in nanoseconds
ts = np.arange(ts0[0]-3*day, ts0[-1]+3.01*day, day)
dDs = (fD0(ts+3*day) - fD0(ts-3*day))/6
fdD = scipy.interpolate.interp1d(
ts, dDs, 'linear', bounds_error=False,
fill_value=(dDs[0], dDs[-1]))
# reconstruct D(t) to be consistent with the smoothened derivative.
Ds = scipy.integrate.cumtrapz(dDs, ts/day, initial=0) + Ds0[0]
fD = scipy.interpolate.interp1d(
ts, Ds, 'linear', bounds_error=False,
fill_value=(Ds[0], Ds[-1]))
Dmin, Dmax = np.min(Ds0), np.max(Ds0)
if Dmin == Dmax:
delay_str = f'{Dmin:.0f}'
else:
delay_str = f'{Dmin:.0f}-{Dmax:.0f}'
if plot:
fig, ax = plt.subplots(1, 1, figsize=(7, 3), tight_layout=True)
tsx = np.linspace(
ts[0],
int(pd.to_datetime('now').to_datetime64())
)
ax.plot(pd.to_datetime(tsx.astype(int)), fD(tsx))
ax.set_ylabel('Vertraging (dagen)')
tools.set_xaxis_dateformat(ax, 'Rapportagedatum')
fig.canvas.set_window_title('Vertraging infectiedatum - rapportage')
fig.show()
return fD, fdD, delay_str
|
ee6acbc265d8020815ac2e9cd77fe74a6ff9d5f7
| 32,224 |
def deimmunization_rate_80():
"""
Real Name: b'deimmunization rate 80'
Original Eqn: b'Recovered 80/immunity time 80'
Units: b'person/Day'
Limits: (None, None)
Type: component
b''
"""
return recovered_80() / immunity_time_80()
|
9221343889ba05d93671102e72ef70a5efd40a5a
| 32,225 |
def connect_to_lightsail():
"""
Uses Paramiko to create a connection to Brendan's instance. Relies on authetication information from a JSON file.
:return SFTP_Client:
"""
return open_sftp_from_json(JSON_PRIVATE_DIR / 'lightsail_server_info.json')
|
fb0f74fe58e5a99ca93737415b931018be4d67d7
| 32,226 |
def coleman_operator(c, cp):
"""
The approximate Coleman operator.
Iteration with this operator corresponds to time iteration on the Euler
equation. Computes and returns the updated consumption policy
c. The array c is replaced with a function cf that implements
univariate linear interpolation over the asset grid for each
possible value of z.
Parameters
----------
c : array_like(float)
A NumPy array of dim len(cp.asset_grid) times len(cp.z_vals)
cp : ConsumerProblem
An instance of ConsumerProblem that stores primitives
Returns
-------
array_like(float)
The updated policy, where updating is by the Coleman
operator.
"""
# === simplify names, set up arrays === #
R, Pi, beta, du, b = cp.R, cp.Pi, cp.beta, cp.du, cp.b
asset_grid, z_vals = cp.asset_grid, cp.z_vals
z_size = len(z_vals)
gamma = R * beta
vals = np.empty(z_size)
# === linear interpolation to get consumption function === #
def cf(a):
"""
The call cf(a) returns an array containing the values c(a,
z) for each z in z_vals. For each such z, the value c(a, z)
is constructed by univariate linear approximation over asset
space, based on the values in the array c
"""
for i in range(z_size):
vals[i] = np.interp(a, asset_grid, c[:, i])
return vals
# === solve for root to get Kc === #
Kc = np.empty(c.shape)
for i_a, a in enumerate(asset_grid):
for i_z, z in enumerate(z_vals):
def h(t):
expectation = np.dot(du(cf(R * a + z - t)), Pi[i_z, :])
return du(t) - max(gamma * expectation, du(R * a + z + b))
Kc[i_a, i_z] = brentq(h, 1e-8, R * a + z + b)
return Kc
|
dee76b425b5a81799fd1677f2b9ca9889f4a813c
| 32,227 |
def generate_scanset_metadata( image_set_dictionary, html_base_path, session_id ):
"""This is passed a set of NII images, their PNG equilvalents, and an html base path, and then it generates the metadata needed """
cur_subj_info = {}
"""need to think through the data structure a bit more.... but can always adjust later """
cur_subj_info['session_id'] = session_id
#cur_subj_info['img_id'] = counter
cur_subj_info['subject_id'] = session_id.split('/')[0]
global counter
nii_image_dict = image_set_dictionary['nii_images']
png_image_dict = image_set_dictionary['png_image_set']
scan_metadata = {}
for scan in nii_image_dict:
print "propcessing ", scan
nii_img = nii_image_dict[scan]['base_image'][0]
print nii_img
# if 'mask' not in scan:
# if 'mask' not in scan:
# nii_img = nii_image_dict[scan]['base_image'][0]
# else:
# continue
# print "HI DAVE!"
if not nii_img:
print "did not find base image for",nii_image_dict
continue
png_img = html_path_root+ png_image_dict[scan]
print nii_img,"is being passed"
(dim_x, dim_y, dim_z, vox_size_x, vox_size_y, vox_size_z, image_orientation )= igcf.get_nii_image_info(nii_img)
image_info = Image.open(png_img)
width, height = image_info.size
#print width,height,dim_x,dim_y,dim_z,vox_size_x,vox_size_y,vox_size_z
scan_info = {}
scan_info['slice_width'] = dim_x
scan_info['slice_height'] = dim_y
scan_info['num_slices'] = dim_z
scan_info['main_image_width'] = width
scan_info['main_image_height'] = height
scan_info['nii_image'] = nii_img
scan_info['base_url'] = png_img.replace(html_path_root,'')
scan_metadata[scan] = scan_info
### There can be one or MORE masks for a given base image... so I will return a list of
#dictionaries..
mask_list = nii_image_dict[scan]['masks']
mask_id = 0
mask_info_list = []
for mask in mask_list:
cur_mask_info = {}
### I'll call the mask by it's basename
print mask,"was passed..."
mask_base = os.path.basename(mask)
nii_img = nii_image_dict[scan]['masks'][mask_id]
print nii_image_dict,'mask_id is',mask_id
print "nii maeg found should be",nii_img
if not nii_img:
print "did not find a valid mask image for ",nii_image_dict
continue
cur_mask_info['name'] = mask_base
cur_mask_info['id'] = mask_id
cur_mask_info['nii_file'] = nii_img
## NEED TO ADD IN THE MASK_URL
# cur_mask_info['mask_url'] =
print png_image_dict
png_img = html_path_root+ png_image_dict[scan]
print nii_img,"is being passed"
cur_mask_info['mask_url'] = png_img.replace(html_path_root,'')
mask_info_list.append( cur_mask_info )
mask_id +=1
# print cur_mask_info
cur_subj_info['masks'] = mask_info_list
scan_metadata[scan]['masks'] = [ mask_info_list]
# print mask_info_list
cur_subj_info['image_data'] = scan_metadata
counter += 1
return { 'session_name': session_id , 'session_metadata': cur_subj_info }
|
4fa326018fc64f9ef2f7974d850256fdfa30f8f6
| 32,228 |
def read_error_codes(src_root='src/mongo'):
"""Define callback, call parse_source_files() with callback, save matches to global codes list."""
seen = {}
errors = []
dups = defaultdict(list)
skips = []
malformed = [] # type: ignore
# define validation callbacks
def check_dups(assert_loc):
"""Check for duplicates."""
codes.append(assert_loc)
code = assert_loc.code
if not code in seen:
seen[code] = assert_loc
else:
if not code in dups:
# on first duplicate, add original to dups, errors
dups[code].append(seen[code])
errors.append(seen[code])
dups[code].append(assert_loc)
errors.append(assert_loc)
def validate_code(assert_loc):
"""Check for malformed codes."""
code = int(assert_loc.code)
if code > MAXIMUM_CODE:
malformed.append(assert_loc)
errors.append(assert_loc)
def callback(assert_loc):
validate_code(assert_loc)
check_dups(assert_loc)
parse_source_files(callback, src_root)
if "0" in seen:
code = "0"
bad = seen[code]
errors.append(bad)
line, col = get_line_and_column_for_position(bad)
print("ZERO_CODE:")
print(" %s:%d:%d:%s" % (bad.sourceFile, line, col, bad.lines))
for loc in skips:
line, col = get_line_and_column_for_position(loc)
print("EXCESSIVE SKIPPING OF ERROR CODES:")
print(" %s:%d:%d:%s" % (loc.sourceFile, line, col, loc.lines))
for code, locations in list(dups.items()):
print("DUPLICATE IDS: %s" % code)
for loc in locations:
line, col = get_line_and_column_for_position(loc)
print(" %s:%d:%d:%s" % (loc.sourceFile, line, col, loc.lines))
for loc in malformed:
line, col = get_line_and_column_for_position(loc)
print("MALFORMED ID: %s" % loc.code)
print(" %s:%d:%d:%s" % (loc.sourceFile, line, col, loc.lines))
return (codes, errors, seen)
|
46f64798fd3e7010a96e054600557464cf99eade
| 32,229 |
def filter_check_vlan_number(value):
"""
Function to check for a good VLAN number in a template
:param value:
:return:
"""
error = f'{value} !!!! possible error the VLAN# should be between 1 and 4096!!!!'
if not value: # pylint: disable=no-else-return
J2_FILTER_LOGGER.info('filter_check_vlan_number %s', error)
return error
else:
try:
if int(value) not in range(1, 4097): # pylint: disable=no-else-return
return error
else:
return value
except ValueError as e: # pylint: disable=invalid-name
J2_FILTER_LOGGER.info('filter_check_vlan_number %s, caught %s', error, e)
return error
|
6c9e060b13f49048f056b72a6def2d1d15241a74
| 32,230 |
def _sanitize(element) -> Gst.Element:
"""
Passthrough function which sure element is not `None`
Returns `Gst.Element` or raises Error
"""
if element is None:
raise Exception("Element is none!")
else:
return element
|
f07062474dcf2671cb1c3d13a7e80d9ee96b9878
| 32,231 |
import pytz
def mean(dt_list):
"""
.. py:function:: mean(dt_list)
Returns the mean datetime from an Iterable collection of datetime objects.
Collection can be all naive datetime objects or all datatime objects with tz
(if non-naive datetimes are provided, result will be cast to UTC).
However, collection cannot be a mix of naive and non-naive datetimes.
Can handle micro-second level datetime differences. Can handle Collection of
datetime objects with different timezones. Works with lists or pandas.Series.
:param collection.Iterable dt_list: Iterable list or Series of datetime objects
:return: mean datetime
:rtype: datetime.datetime
:raises TypeError: if operand is not type Iterable or
if operand contains naive and non-naive datetime objects or
if result is not type datetime.datetime
"""
try:
list_size = len(dt_list)
except TypeError:
raise TypeError(_OPER_ERR_MSG + str(dt_list))
if list_size == 1:
mean_dt = dt_list[0]
elif (list_size == 2) and (dt_list[0] == dt_list[1]):
mean_dt = dt_list[0]
else:
try:
if dt_list[0].tzinfo:
base_dt = dt.datetime(1970, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
else:
base_dt = dt.datetime(1970, 1, 1)
delta_total = 0
for item in dt_list:
delta_total += (item - base_dt).total_seconds()
delta = delta_total / float(list_size)
mean_dt = base_dt + dt.timedelta(seconds=delta)
except TypeError:
raise TypeError(_OPER_ERR_MSG + str(dt_list))
except IndexError:
raise IndexError(_LEN_ERR_MSG)
return validate_dt(mean_dt)
|
2d56eeea44d2afbf752672abb6870d7045745a0f
| 32,232 |
from typing import Optional
from typing import Dict
def win_get_nonblocking(name: str, src_weights: Optional[Dict[int, float]] = None,
require_mutex: bool = False) -> int:
""" Passively get the tensor(s) from neighbors' shared window memory into
local shared memory, which cannot be accessed in python directly.
The win_update function is responsible for fetching that memeory.
This is a non-blocking function, which will return without waiting the
win_get operation is really finished.
Args:
name: The unique name to associate the window object.
src_weights: A dictionary that maps the source ranks to the weight.
Namely, {rank: weight} means get tensor from rank neighbor multipling the weight.
If not provided, src_weights will be set as all neighbor ranks defined by
virtual topology with weight 1.0.
Note src_weights should only contain the in-neighbors only.
require_mutex: If set to be true, out-neighbor process's window mutex will be
acquired.
Returns:
A handle to the win_get operation that can be used with `win_poll()` or
`win_wait()`.
"""
function = "bluefog_torch_win_get"
src_weights = ({rank: 1.0 for rank in in_neighbor_ranks()}
if src_weights is None else src_weights)
if not set(src_weights.keys()).issubset(set(in_neighbor_ranks())):
raise ValueError(
"The key of src_weights should only containranks that "
" belong to in-neighbors.")
handle = getattr(mpi_lib, function)(name, src_weights, require_mutex)
_win_handle_map[handle] = name
return handle
|
a641f963ac3434ece7ded8a642c7833fc8a2b30c
| 32,233 |
def parse(file, beautifulsoup=None, makeelement=None, **bsargs):
"""Parse a file into an ElemenTree using the BeautifulSoup parser.
You can pass a different BeautifulSoup parser through the
`beautifulsoup` keyword, and a diffent Element factory function
through the `makeelement` keyword. By default, the standard
``BeautifulSoup`` class and the default factory of `lxml.html` are
used.
"""
if not hasattr(file, 'read'):
file = open(file)
root = _parse(file, beautifulsoup, makeelement, **bsargs)
return etree.ElementTree(root)
|
5ccf2bfc8f1d6ec4f83200b250755ab149fd60dd
| 32,234 |
def get_L_dashdash_b1_d(L_dashdash_b1_d_t):
"""
Args:
L_dashdash_b1_d_t: 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/h)
Returns:
1日当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_b1_d_t.reshape((365, 24)), axis=1)
|
aa541c5f82aa94c33c65ac264f2df420020ca443
| 32,235 |
def split_df(df, index_range, columns, iloc=False):
"""Split a data frame by selecting from columns a particular range.
Args:
df (:class:`pd.DataFrame`): Data frame to split.
index_range (tuple): Tuple containing lower and upper limit of the
range to split the index by. If `index_range = (a, b)`, then
`[a, b)` is taken.
columns (list[object]): Columns to select.
iloc (bool, optional): The index range is the integer location instead
of the index value. Defaults to `False`.
Returns:
tuple[:class:`pd.DataFrame`]: Selected rows from selected columns
and the remainder.
"""
if iloc:
inds = np.arange(df.shape[0])
rows = (inds >= index_range[0]) & (inds < index_range[1])
else:
rows = (df.index >= index_range[0]) & (df.index < index_range[1])
selected = pd.DataFrame([df[name][rows] for name in columns]).T
remainder = pd.DataFrame(
[df[name][~rows] for name in columns]
+ [df[name] for name in set(df.columns) - set(columns)]
).T
# Fix order of columns.
selected_inds = [i for i, c in enumerate(df.columns) if c in columns]
selected = selected.reindex(df.columns[np.array(selected_inds)], axis=1)
remainder = remainder.reindex(df.columns, axis=1)
return selected, remainder
|
84e77e60a0f9c73ff3147c3648310875e5b58228
| 32,236 |
def basemap_to_tiles(basemap, day=yesterday, **kwargs):
"""Turn a basemap into a TileLayer object.
Parameters
----------
basemap : class:`xyzservices.lib.TileProvider` or Dict
Basemap description coming from ipyleaflet.basemaps.
day: string
If relevant for the chosen basemap, you can specify the day for
the tiles in the "%Y-%m-%d" format. Defaults to yesterday's date.
kwargs: key-word arguments
Extra key-word arguments to pass to the TileLayer constructor.
"""
if isinstance(basemap, xyzservices.lib.TileProvider):
url = basemap.build_url(time=day)
elif isinstance(basemap, dict):
url = basemap.get("url", "")
else:
raise ValueError("Invalid basemap type")
return TileLayer(
url=url,
max_zoom=basemap.get('max_zoom', 18),
min_zoom=basemap.get('min_zoom', 1),
attribution=basemap.get('html_attribution', '') or basemap.get('attribution', ''),
name=basemap.get('name', ''),
**kwargs
)
|
ccaf3430294216e7015167dad3ef82bee8071192
| 32,237 |
def sms_count(request):
"""Return count of SMSs in Inbox"""
sms_count = Messaging.objects.filter(hl_status__exact='Inbox').count()
sms_count = sms_count if sms_count else ""
return HttpResponse(sms_count)
|
c445b7c5fd54f632fc6f7c3d0deaeca47c1dd382
| 32,239 |
from pathlib import Path
import yaml
def deserializer(file_name: Path) -> Deserializer:
"""Load and parse the data deserialize declaration"""
with open(file_name) as f:
return Deserializer(yaml.load(f, Loader=SafeLoader))
|
5df5de579e359e7d1658dd00cf279baacb844f1f
| 32,240 |
def p2db(a):
"""Returns decibel of power ratio"""
return 10.0*np.log10(a)
|
5177d9ca5ca0ec749e64ebf3e704cf496fa365db
| 32,242 |
def buildDictionary(message):
"""
counts the occurrence of every symbol in the message and store it in a python dictionary
parameter:
message: input message string
return:
python dictionary, key = symbol, value = occurrence
"""
_dict = dict()
for c in message:
if c not in _dict.keys():
_dict[c] = 1
else:
_dict[c] += 1
return _dict
|
71b196aaccfb47606ac12242585af4ea2554a983
| 32,243 |
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
import tensorflow.keras.backend as be
def model_fit(mb_query: str, features_dict: dict, target_var: str, model_struct_fn, get_model_sample_fn,
existing_models: dict, batch_size: int, epochs: int, patience: int, verbose: int,
bias_query: str, model_in: str, model_out: str, out_tensorboard: str, lr: float, iter: int,
model_save_dir: str, model_columns: list, target_values: list):
"""
Fits a Keras model. Self-contained with the idea that it is called as a new process.
:param mb_query: query to get the model-build data
:param features_dict: dict of features used to build the model structure
:param target_var: name of the field that's the dependent variable
:param model_struct_fn: function that builds the model structure
:param get_model_sample_fn: function that retrieves the model-build data
:param existing_models: dict of existing models to run and add to the model-build DataFrame
:param batch_size: batch size for model build
:param epochs: # of epochs to run
:param patience: patience in waiting to see if validation metric does not improve
:param verbose: verbosity of .fit (0=quiet, 1=not)
:param bias_query: query to calculate initial bias of output layer
:param model_in: location of the model (for a warm start)
:param model_out: location to store the model
:param out_tensorboard: location of tensorboard output
:param lr: learning rate
:param iter: iteration we're on (for saving the model)
:param model_save_dir: where to put the .h5 file
:param model_columns: columns of .predict output we're interested in for plotting
:param target_values: values of the target feature that correspond to model_columns
:return: history dict
"""
#from muti import tfu commented out 5/1
# model
if model_in != '':
mod = tf.keras.models.load_model(model_in)
be.set_value(mod.optimizer.lr, lr)
else:
bias, p_df = dq_get_bias(bias_query)
mod = model_struct_fn(features_dict, learning_rate=lr, output_bias=bias)
print(mod.summary())
# callbacks
model_ckpt = ModelCheckpoint(model_out, monitor='val_loss', save_best_only=True)
tensorboard = TensorBoard(
log_dir=out_tensorboard,
histogram_freq=1,
write_images=True,
embeddings_freq=100
)
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
verbose=1,
patience=patience,
mode='auto',
restore_best_weights=True)
print('getting data')
data_df = get_model_sample_fn(mb_query, existing_models)
model_df = data_df.loc[data_df['holdout'] == 0].copy()
valid_df = data_df.loc[data_df['holdout'] == 1].copy()
print('modeling data set size: {0}'.format(model_df.shape[0]))
print('validation data set size: {0}'.format(valid_df.shape[0]))
steps_per_epoch = int(model_df.shape[0] / batch_size)
model_ds = get_tf_dataset(features_dict, target_var, model_df, batch_size)
valid_ds = get_tf_dataset(features_dict, target_var, valid_df, batch_size, repeats=1)
print('starting fit')
h = mod.fit(model_ds, epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=verbose,
callbacks=[tensorboard, model_ckpt, early_stopping], validation_data=valid_ds)
save_file = model_save_dir + 'model' + str(iter) + '.h5'
mod.save(save_file, overwrite=True, save_format='h5')
model_output = mod.predict(valid_ds)
valid_df['model'] = get_pred(model_output, model_columns)
valid_df['actual'] = valid_df[target_var].isin(target_values).astype(int)
title = 'Validation KS<br>After {0} epochs'.format((iter + 1) * epochs)
genu.ks_calculate(valid_df['model'], valid_df['actual'], in_browser=True, plot=True, title=title)
title = 'Validation Decile Plot<br>After {0} epochs'.format((iter + 1) * epochs)
genu.decile_plot(valid_df['model'], valid_df['actual'], title=title, in_browser=True)
return h.history
|
add35320ef1d9f6474f3712f3222d9a5fdbb3185
| 32,245 |
def classify_subtrop(storm_type):
"""
SD purely - yes
SD then SS then TS - no
SD then TS - no
"""
if 'SD' in storm_type:
if 'SD' in storm_type and True not in np.isin(storm_type,['TD','TS','HU']):
return True
if 'SS' in storm_type and True not in np.isin(storm_type,['TD','TS','HU']):
return True
else:
return False
|
abfc8e002e798e5642e2ab4ae38fe0882259d708
| 32,246 |
def overridden_settings(settings):
"""Return a dict of the settings that have been overridden"""
settings = Settings(settings)
for name, dft_value in iter_default_settings():
value = settings[name]
if value != dft_value and value is not None:
settings.update(name, value)
elif value is None:
settings.update(name, dft_value)
return settings
|
ec76feb90dbc97012f84f9ebc75b41131dc925fe
| 32,247 |
def ScaleImageToSize(ip, width, height):
"""Scale image to a specific size using Stephans scaler"""
smaller = ip.scale( width, height );
return smaller
|
9e2ee47ab30bfca70417eafbddd84958cd582618
| 32,248 |
import types
def retrieve_parent(*, schema: types.Schema, schemas: types.Schemas) -> str:
"""
Get or check the name of the parent.
If x-inherits is True, get the name of the parent. If it is a string, check the
parent.
Raise InheritanceError if x-inherits is not defined or False.
Args:
schema: The schema to retrieve the parent for.
schemas: All the schemas.
Returns:
The parent.
"""
inherits = peek_helper.inherits(schema=schema, schemas=schemas)
if inherits is True:
return get_parent(schema=schema, schemas=schemas)
if isinstance(inherits, str):
if not check_parent(schema=schema, parent_name=inherits, schemas=schemas):
raise exceptions.InheritanceError(
f"The x-inherits value {inherits} is not a valid parent."
)
return inherits
raise exceptions.InheritanceError(
"Cannot retrieve the name of the parent if x-inherits is not defined or False."
)
|
4f6fc55af7b998e02b108d1bc5fea61f2afe82f1
| 32,249 |
from .translation.vensim.vensim2py import translate_vensim
def read_vensim(mdl_file, data_files=None, initialize=True,
missing_values="warning", split_views=False,
encoding=None, **kwargs):
"""
Construct a model from Vensim `.mdl` file.
Parameters
----------
mdl_file : str
The relative path filename for a raw Vensim `.mdl` file.
initialize: bool (optional)
If False, the model will not be initialize when it is loaded.
Default is True.
data_files: list or str or None (optional)
If given the list of files where the necessary data to run the model
is given. Default is None.
missing_values : str ("warning", "error", "ignore", "keep") (optional)
What to do with missing values. If "warning" (default)
shows a warning message and interpolates the values.
If "raise" raises an error. If "ignore" interpolates
the values without showing anything. If "keep" it will keep
the missing values, this option may cause the integration to
fail, but it may be used to check the quality of the data.
split_views: bool (optional)
If True, the sketch is parsed to detect model elements in each
model view, and then translate each view in a separate python
file. Setting this argument to True is recommended for large
models split in many different views. Default is False.
encoding: str or None (optional)
Encoding of the source model file. If None, the encoding will be
read from the model, if the encoding is not defined in the model
file it will be set to 'UTF-8'. Default is None.
**kwargs: (optional)
Additional keyword arguments for translation.
subview_sep: list
Characters used to separate views and subviews (e.g. [",", "."]).
If provided, and split_views=True, each submodule will be placed
inside the directory of the parent view.
Returns
-------
model: a PySD class object
Elements from the python model are loaded into the PySD class
and ready to run
Examples
--------
>>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl')
"""
py_model_file = translate_vensim(mdl_file, split_views, encoding, **kwargs)
model = load(py_model_file, data_files, initialize, missing_values)
model.mdl_file = str(mdl_file)
return model
|
28d062ebb234cf991dcef164d5151e1ab62e08f7
| 32,250 |
import typing
def get_feature_importance(
trained_pipeline: sklearn.pipeline.Pipeline,
numeric_features: typing.List[str]
) -> pd.Series:
"""
Get feature importance measures from a trained model.
Args:
trained_pipeline (:obj:`sklearn.pipeline.Pipeline`): Fitted model pipeline
numeric_features (list(str)): Names of numeric features
Returns:
:obj:`pandas.Series` containing each feature and its importance
"""
# Retrieve categorical features from the one-hot encoder
# (numeric features need to be passed in manually)
categorical_features = list(trained_pipeline["preprocessor"]
.transformers_[1][1]
.get_feature_names())
features = numeric_features + categorical_features
# Fetch importance values (without labels) from the model itself
importances = trained_pipeline["predictor"].feature_importances_
return pd.Series(data=importances, index=features)
|
cd303af5a0b343a18fb42a3cd562998ecec96423
| 32,251 |
from typing import Any
import json
def json_loads(json_text: str) -> Any:
"""Does the same as json.loads, but with some additional validation."""
try:
json_data = json.loads(json_text)
validate_all_strings(json_data)
return json_data
except json.decoder.JSONDecodeError:
raise _jwt_error.JwtInvalidError('Failed to parse JSON string')
except RecursionError:
raise _jwt_error.JwtInvalidError(
'Failed to parse JSON string, too many recursions')
except UnicodeEncodeError:
raise _jwt_error.JwtInvalidError('invalid character')
|
d123054612a0a3e29f312e1506181ca3f9bed219
| 32,252 |
def weights(layer, expected_layer_name):
"""
Return the kernels/weights and bias from the VGG model for a given layer.
"""
W = vgg_layers[0][layer][0][0][2][0][0]
b = vgg_layers[0][layer][0][0][2][0][1]
layer_name = vgg_layers[0][layer][0][0][0][0]
#to check we obtained the correct layer from the vgg model
assert layer_name == expected_layer_name
return W, b
|
5271f932bd9a870bd7857db50632cd51d91b60a9
| 32,253 |
import textwrap
def alert(title: str, text: str, *, level: str = "warning", ID: str = None):
"""
Generate the HTML to display a banner that can be permanently hidden
This is used to inform player of important changes in updates.
Arguments:
text: Main text of the banner
title: Title of the banner
type: On of "warning", "info". The aspect of the banner
ID: optional string ID of this banner, if you need to check if it is
open/closed somewhere. Do NOT use numbers
"""
if not level in ("info", "warning"):
raise ValueError("Level must be among 'info', 'warning'")
if alert.has_disable_been_called:
raise RuntimeError(
"The function alert() is called after disable_old_alert() has generated "
"the javascript code to handle hidding closed alerts. This breaks the "
"system completely, make sure disable_old_alerts is called last"
)
if ID is None:
alert_id = alert.numid
alert.numid += 1
else:
alert_id = str(ID)
alert.strid.append(alert_id)
indent = " " * 4 * 4
text = str(text).replace("\n", "\n" + indent)
return textwrap.dedent(
f"""\
<input type="hidden" class="alert-hidder" name="attr_alert-{alert_id}" value="0"/>
<div class="alert alert-{level}">
<div>
<h3> {level.title()} - {title}</h3>
{text}
</div>
<label class="fakebutton">
<input type="checkbox" name="attr_alert-{alert_id}" value="1" /> ×
</label>
</div>"""
)
|
90ff85c228dc70318deee196bdd512e5be90a5ad
| 32,254 |
def get_stim_data_df(sessions, analyspar, stimpar, stim_data_df=None,
comp_sess=[1, 3], datatype="rel_unexp_resp", rel_sess=1,
basepar=None, idxpar=None, abs_usi=True, parallel=False):
"""
get_stim_data_df(sessions, analyspar, stimpar)
Returns dataframe with relative ROI data for one session relative
to another, for each line/plane.
Required args:
- sessions (list):
session objects
- analyspar (AnalysPar):
named tuple containing analysis parameters
- stimpar (StimPar):
named tuple containing stimulus parameters
Optional args:
- stim_data_df (pd.DataFrame):
dataframe with one row per line/plane, and the basic sess_df
columns
default: None
- comp_sess (int):
sessions for which to obtain absolute fractional change
[x, y] => |(y - x) / x|
default: [1, 3]
- datatype (str):
type of data to retrieve
default: "rel_unexp_resp"
- rel_sess (int):
number of session relative to which data should be scaled, for each
mouse
default: 1
- basepar (BasePar):
named tuple containing baseline parameters
(needed if datatype is "usis")
default: None
- idxpar (IdxPar):
named tuple containing index parameters
(needed if datatype is "usis")
default: None
- abs_usi (bool):
if True, absolute USIs are returned (applies if datatype is "usis")
default: True
- parallel (bool):
if True, some of the analysis is run in parallel across CPU cores
default: False
Returns:
- stim_data_df (pd.DataFrame):
dataframe with one row per line/plane, and the basic sess_df
columns, as well as stimulus columns for each comp_sess:
- {stimpar.stimtype}_s{comp_sess[0]}:
first comp_sess data for each ROI
- {stimpar.stimtype}_s{comp_sess[1]}:
second comp_sess data for each ROI
"""
data_df = collect_base_data(
sessions, analyspar, stimpar, datatype=datatype, rel_sess=rel_sess,
basepar=basepar, idxpar=idxpar, abs_usi=abs_usi, parallel=parallel
)
stim_data_df = check_init_stim_data_df(
data_df, sessions, stimpar, stim_data_df=stim_data_df,
analyspar=analyspar
)
# populate dataframe
group_columns = ["lines", "planes"]
for grp_vals, grp_df in data_df.groupby(group_columns):
grp_df = grp_df.sort_values(["sess_ns", "mouse_ns"])
line, plane = grp_vals
row_idxs = stim_data_df.loc[
(stim_data_df["lines"] == line) & (stim_data_df["planes"] == plane)
].index
if len(row_idxs) != 1:
raise ValueError("Expected exactly one row to match line/plane.")
row_idx = row_idxs[0]
sess_ns = sorted(grp_df["sess_ns"].unique())
for sess_n in comp_sess:
if int(sess_n) not in sess_ns:
raise RuntimeError(f"Session {sess_n} missing in grp_df.")
# obtain comparison data
comp_data = [[], []]
for mouse_n in sorted(grp_df["mouse_ns"].unique()):
mouse_loc = (grp_df["mouse_ns"] == mouse_n)
for i in range(2):
sess_loc = (grp_df["sess_ns"] == comp_sess[i])
data_row = grp_df.loc[mouse_loc & sess_loc]
if len(data_row) != 1:
raise RuntimeError("Expected to find exactly one row")
# retrieve ROI data
data = data_row.loc[data_row.index[0], datatype]
comp_data[i].append(data)
# add data for each session to dataframe
for n, data in zip(comp_sess, comp_data):
stim_data_df.loc[row_idx, f"{stimpar.stimtype}_s{n}"] = \
np.concatenate(data)
return stim_data_df
|
5a352a66ad06ed70b04db3ca3e26073fb412cccd
| 32,255 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.