content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def path_graph(n, create_using=None):
"""Returns the Path graph `P_n` of linearly connected nodes.
Parameters
----------
n : int or iterable
If an integer, node labels are 0 to n with center 0.
If an iterable of nodes, the center is the first.
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
"""
n_name, nodes = n
G = empty_graph(nodes, create_using)
G.add_edges_from(pairwise(nodes))
return G
|
53eabfd815b3c37f6b7e8e8e54e1a0357c0fcca9
| 31,113 |
def numpy_shift(array: np.ndarray, periods: int, axis: int, fill_value=np.nan) -> np.ndarray:
"""
numpy implementation for validation
"""
assert axis in range(-array.ndim, array.ndim)
copy_src_indices = [slice(None)] * array.ndim
copy_dst_indices = [slice(None)] * array.ndim
fill_indices = [slice(None)] * array.ndim
if periods > 0:
fill_indices[axis] = slice(None, periods)
copy_src_indices[axis] = slice(None, -periods)
copy_dst_indices[axis] = slice(periods, None)
elif periods < 0:
fill_indices[axis] = slice(periods, None)
copy_src_indices[axis] = slice(-periods, None)
copy_dst_indices[axis] = slice(None, periods)
else:
return array.copy()
result = np.empty_like(array)
result[tuple(fill_indices)] = fill_value
result[tuple(copy_dst_indices)] = array[tuple(copy_src_indices)]
return result
|
3ffc61a61abc5bdc3547822c0715a81efb9d0b4d
| 31,114 |
def draw_rect(im, cords, color = None):
"""Draw the rectangle on the image
Parameters
----------
im : numpy.ndarray
numpy image
cords: numpy.ndarray
Numpy array containing bounding boxes of shape `N X 4` where N is the
number of bounding boxes and the bounding boxes are represented in the
format `x1 y1 x2 y2`
Returns
-------
numpy.ndarray
numpy image with bounding boxes drawn on it
"""
im = im.copy()
cords = cords[:,:4]
cords = cords.reshape(-1,4)
if not color:
color = [255,255,255]
for cord in cords:
pt1, pt2 = (cord[0], cord[1]) , (cord[2], cord[3])
pt1 = int(pt1[0]), int(pt1[1])
pt2 = int(pt2[0]), int(pt2[1])
im = cv2.rectangle(im.copy(), pt1, pt2, color, int(max(im.shape[:2])/200))
return im
|
02c52166f0f9c25d9f9ad61f7083d7ee733759aa
| 31,115 |
from typing import List
def _setup_entities(hass, dev_ids: List[str]):
"""Set up CAME light device."""
manager = hass.data[DOMAIN][CONF_MANAGER] # type: CameManager
entities = []
for dev_id in dev_ids:
device = manager.get_device_by_id(dev_id)
if device is None:
continue
entities.append(CameLightEntity(device))
return entities
|
5c9deb213b5df197aaf075f62a68c1472951a3e0
| 31,116 |
def check_doa(geometry, doa, online=False):
"""
Check value of the DoA
"""
if not online:
doas = [doa]
else:
doas = doa
for doa in doas:
if doa < 0:
return False
if geometry == "linear" and doa > 180:
return False
if geometry == "circular" and doa >= 360:
return False
return True
|
ae2be5c478968358a9edf6e7da63e586af39eed8
| 31,118 |
def _trim_orderbook_list(arr: list, ascending: bool, limit_len: int = 50) -> list:
"""trims prices up to 4 digits precision"""
first_price = arr[0][0]
if first_price < 0.1:
unit = 1e-5
elif first_price < 1:
unit = 1e-4
elif first_price < 10:
unit = 1e-3
elif first_price < 100:
unit = 1e-2
elif first_price < 1000:
unit = 1e-1
elif first_price < 10000:
unit = 1
else:
unit = 10
trimmed_price = jh.orderbook_trim_price(first_price, ascending, unit)
temp_qty = 0
trimmed_arr = []
for a in arr:
if len(trimmed_arr) == limit_len:
break
if (ascending and a[0] > trimmed_price) or (not ascending and a[0] < trimmed_price):
# add previous record
trimmed_arr.append([
trimmed_price, temp_qty
])
# update temp values
temp_qty = a[1]
trimmed_price = jh.orderbook_trim_price(a[0], ascending, unit)
else:
temp_qty += a[1]
return trimmed_arr
|
557dceab9b11836b2f884ebc40d1e66769f5eb4c
| 31,121 |
from corpkit.process import make_name_to_query_dict
def process_piece(piece, op='=', quant=False, **kwargs):
"""
Make a single search obj and value
"""
if op not in piece:
return False, False
translator = make_name_to_query_dict()
target, criteria = piece.split(op, 1)
criteria = criteria.strip('"')
criteria = remake_special(criteria, **kwargs)
if '-' in target:
obj, show = target.split('-', 1)
show = show.lower()
if show == 'deprel':
show = 'Function'
elif show == 'pos':
show = 'POS'
form = '{} {}'.format(obj.title(), show.lower())
else:
form = target.title()
if form == 'Deprel':
form = 'Function'
elif form == 'Pos':
form = 'POS'
return translator.get(form), criteria
|
2d79455fcc27e0b3b9a81209cdc28089a7b73f5a
| 31,122 |
import random
def normal220(startt,endt,money2,first,second,third,forth,fifth,sixth,seventh,zz1,zz2,bb1,bb2,bb3,aa1,aa2):
"""
for source and destination id generation
"""
"""
for type of banking work,label of fraud and type of fraud
"""
idvariz=random.choice(bb1)
idgirande=random.choice(zz2)
first.append("transfer")
second.append(idvariz)
third.append(idgirande)
sixth.append("0")
seventh.append("none")
"""
for amount of money generation
"""
numberofmoney=random.randrange(50000,money2)
forth.append(numberofmoney)
"""
for date and time generation randomly between two dates
"""
final=randomDate(startt,endt, random.random())
fifth.append(final)
return (first,second,third,forth,fifth,sixth,seventh)
|
d83408e8d5ec18d0a6de2aee62ca12aa5f1561b8
| 31,123 |
def sigma_voigt(dgm_sigmaD,dgm_gammaL):
"""compute sigma of the Voigt profile
Args:
dgm_sigmaD: DIT grid matrix for sigmaD
dgm_gammaL: DIT grid matrix for gammaL
Returns:
sigma
"""
fac=2.*np.sqrt(2.*np.log(2.0))
fdgm_gammaL=jnp.min(dgm_gammaL,axis=1)*2.0
fdgm_sigmaD=jnp.min(dgm_sigmaD,axis=1)*fac
fv=jnp.min(0.5346*fdgm_gammaL+jnp.sqrt(0.2166*fdgm_gammaL**2+fdgm_sigmaD**2))
sigma=fv/fac
return sigma
|
27581dc61e04d2f5b1411cb64093e03378bb8297
| 31,124 |
def zigzag2(i, curr=.45, upper=.48, lower=.13):
"""
Generalized version of the zig-zag function.
Returns points oscillating between two bounds
linearly.
"""
if abs(i) <= (upper-curr):
return curr + i
else:
i = i - (upper-curr)
i = i%(2*(upper-lower))
if i < (upper-lower):
return upper-i
else:
return 2*lower-upper+i
|
a51624af520121eb7285b2a8a5b4dc5ffa552147
| 31,125 |
def get_len_of_range(start, stop, step):
"""Get the length of a (start, stop, step) range."""
n = 0
if start < stop:
n = ((stop - start - 1) // step + 1);
return n
|
4c43f502efe7ad0e20a9bc7624e6d47e208a94a7
| 31,126 |
def generate_tap(laygen, objectname_pfix, placement_grid, routing_grid_m1m2_thick, devname_tap_boundary, devname_tap_body,
m=1, origin=np.array([0,0]), transform='R0'):
"""generate a tap primitive"""
pg = placement_grid
rg_m1m2_thick = routing_grid_m1m2_thick
# placement
itapbl0 = laygen.place("I" + objectname_pfix + 'BL0', devname_tap_boundary, pg, xy=origin, transform=transform)
itap0 = laygen.relplace(name = "I" + objectname_pfix + '0', templatename = devname_tap_body, gridname = pg, refinstname = itapbl0.name, shape=np.array([m, 1]), transform=transform)
itapbr0 = laygen.relplace(name = "I" + objectname_pfix + 'BR0', templatename = devname_tap_boundary, gridname = pg, refinstname = itap0.name, transform=transform)
#power route
laygen.route(None, laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=np.array([0, 0]), gridname0=rg_m1m2_thick,
refinstname0=itap0.name, refpinname0='TAP0', refinstindex0=np.array([0, 0]),
refinstname1=itap0.name, refpinname1='TAP1', refinstindex1=np.array([m-1, 0])
)
for i in range(1-1, int(m/2)+0):
laygen.via(None, np.array([0, 0]), refinstname=itap0.name, refpinname='TAP0', refinstindex=np.array([2*i, 0]),
gridname=rg_m1m2_thick)
return [itapbl0, itap0, itapbr0]
|
59f3ceeee74e8b99690e86b671cbd1e503fe4121
| 31,127 |
from typing import Union
from typing import Optional
def multiply(
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> ivy.Array:
"""Calculates the product for each element ``x1_i`` of the input array ``x1`` with
the respective element ``x2_i`` of the input array ``x2``.
**Special cases**
For floating-point operands,
- If either ``x1_i`` or ``x2_i`` is ``NaN``, the result is ``NaN``.
- If ``x1_i`` is either ``+infinity`` or ``-infinity`` and ``x2_i`` is either ``+0``
or ``-0``, the result is ``NaN``.
- If ``x1_i`` is either ``+0`` or ``-0`` and ``x2_i`` is either ``+infinity`` or
``-infinity``, the result is ``NaN``.
- If ``x1_i`` and ``x2_i`` have the same mathematical sign, the result has a
positive mathematical sign, unless the result is ``NaN``. If the result is
``NaN``, the “sign” of ``NaN`` is implementation-defined.
- If ``x1_i`` and ``x2_i`` have different mathematical signs, the result has a
negative mathematical sign, unless the result is ``NaN``. If the result is
``NaN``, the “sign” of ``NaN`` is implementation-defined.
- If ``x1_i`` is either ``+infinity`` or ``-infinity`` and ``x2_i`` is either
``+infinity`` or ``-infinity``, the result is a signed infinity with the
mathematical sign determined by the rule already stated above.
- If ``x1_i`` is either ``+infinity`` or ``-infinity`` and ``x2_i`` is a nonzero
finite number, the result is a signed infinity with the mathematical sign
determined by the rule already stated above.
- If ``x1_i`` is a nonzero finite number and ``x2_i`` is either ``+infinity`` or
``-infinity``, the result is a signed infinity with the mathematical sign
determined by the rule already stated above.
In the remaining cases, where neither ``infinity`` nor ``NaN`` is involved, the
product must be computed and rounded to the nearest representable value according to
IEEE 754-2019 and a supported rounding mode. If the magnitude is too large to
represent, the result is an ``infinity`` of appropriate mathematical sign. If the
magnitude is too small to represent, the result is a zero of appropriate
mathematical sign.
.. note::
Floating-point multiplication is not always associative due to finite precision.
Parameters
----------
x1
first input array. Should have a numeric data type.
x2
second input array. Must be compatible with ``x1`` (see ref:`Broadcasting`).
Should have a numeric data type.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the element-wise products. The returned array must have a
data type determined by :ref:`Type Promotion Rules`.
"""
return _cur_backend(x1, x2).multiply(x1, x2, out)
|
53ce0b715933b59b40abd7f51d7bb104874e54db
| 31,128 |
def discriminateEvents(events, threshold):
"""
Discriminate triggers when different kind of events are on the same channel.
A time threshold is used to determine if two events are from the same trial.
Parameters
----------
events : instance of pandas.core.DataFrame
Dataframe containing the list of events obtained with
mne.find_events(raw).
threshold : float
Time threshold in milliseconds. Keeps an event if the time difference
with the next one is superior than threshold.
Returns:
newData : instance of pandas.series.Series
List of trial number filling the requirements.
"""
# calculate the rolling difference (between n and n+1)
events['diff'] = events[0].diff()
# replace the nan with the first value
events['diff'].iloc[0] = events.iloc[0, 0]
# select events with time distance superior to threshold
events = events[events['diff']>threshold]
events = events.reset_index(drop=True)
del events['diff']
return events
|
0078548ea463c01d88b574185b3dcb5632e5cd13
| 31,129 |
def whiten(sig, win):
"""Whiten signal, modified from MSNoise."""
npts = len(sig)
nfreq = int(npts // 2 + 1)
assert (len(win) == nfreq)
# fsr = npts / sr
fsig = fft(sig)
# hl = nfreq // 2
half = fsig[: nfreq]
half = win * phase(half)
fsig[: nfreq] = half
fsig[-nfreq + 1:] = half[1:].conjugate()[::-1]
return np.real(ifft(fsig))
|
0c6e4300d7bebc466039ee447c7f6507d211be1c
| 31,130 |
def create_slice(request):
"""
Create a slice based on a pattern. User has to be the "production_request" owner. Steps should contain dictionary of step
names which should be copied from the pattern slice as well as modified fields, e.g. {'Simul':{'container_name':'some.container.name'}}
:param production_request: Prodcution request ID. Required
:param pattern_slice: Pattern slice number. Required
:param pattern_request: Pattern slice number. Required
:param steps: Dictionary of steps to be copied from pattern slice. Required
:param slice: Dictionary of parameters to be changed in a slice. optional
"""
try:
data = request.data
production_request = TRequest.objects.get(reqid=data['production_request'])
if request.user.username != production_request.manager:
return HttpResponseForbidden()
pattern_slice = int(data['pattern_slice'])
pattern_request = int(data['pattern_request'])
steps = data['steps']
slice_dict = data.get('slice')
new_slice_number = None
if InputRequestList.objects.filter(request=production_request).count()<1000:
new_slice_number = clone_pattern_slice(production_request.reqid, pattern_request, pattern_slice, steps, slice_dict)
if new_slice_number and (production_request.cstatus not in ['test', 'approved']):
set_request_status('cron',production_request.reqid,'approved','Automatic approve by api', 'Request was automatically extended')
except Exception as e:
return HttpResponseBadRequest(e)
return Response({'slice': new_slice_number,'request':production_request.reqid})
|
c01f15b6b369d9159b02c06ce6e2bc9c63dac50e
| 31,131 |
def get_reverse_charge_recoverable_total(filters):
"""Returns the sum of the total of each Purchase invoice made with recoverable reverse charge."""
query_filters = get_filters(filters)
query_filters.append(['reverse_charge', '=', 'Y'])
query_filters.append(['recoverable_reverse_charge', '>', '0'])
query_filters.append(['docstatus', '=', 1])
try:
return frappe.db.get_all('Purchase Invoice',
filters = query_filters,
fields = ['sum(total)'],
as_list=True,
limit = 1
)[0][0] or 0
except (IndexError, TypeError):
return 0
|
95b24a90230675ffda80760d073705f5da823bc8
| 31,132 |
import hashlib
def load_data(url_bam):
"""Load ``MetaData`` from the given ``url_bam``."""
if url_bam.scheme != "file":
raise ExcovisException("Can only load file resources at the moment")
with pysam.AlignmentFile(url_bam.path, "rb") as samfile:
read_groups = samfile.header.as_dict().get("RG", [])
if len(read_groups) != 1:
raise ExcovisException("Must have one read group per BAM file!")
sample = read_groups[0].get("SM", fs.path.basename(url_bam.path[: -len(".bam")]))
hash = hashlib.sha256(url_bam.path.encode("utf-8")).hexdigest()
return MetaData(id=hash, path=url_bam.path, sample=strip_sample(sample))
|
e5c17b6ad236ebcbd4222bbe5e3cbdb923965325
| 31,133 |
import random
import string
def generate_room_number():
"""
Generates a room number composed of 10 digits.
"""
return "".join(random.sample(string.digits, 10))
|
133e7463106df89fb68de6a7dfa7c718bc1bc5ba
| 31,134 |
def is_noiseless(model: Model) -> bool:
"""Check if a given (single-task) botorch model is noiseless"""
if isinstance(model, ModelListGP):
raise ModelError(
"Checking for noisless models only applies to sub-models of ModelListGP"
)
return model.__class__ in NOISELESS_MODELS
|
d1b5cfee5713cc44d40a7d0d98542d0d2147a9a3
| 31,135 |
def random_sampling_normalized_variance(sampling_percentages, indepvars, depvars, depvar_names,
n_sample_iterations=1, verbose=True, npts_bandwidth=25, min_bandwidth=None,
max_bandwidth=None, bandwidth_values=None, scale_unit_box=True, n_threads=None):
"""
Compute the normalized variance derivatives :math:`\\hat{\\mathcal{D}}(\\sigma)` for random samples of the provided
data specified using ``sampling_percentages``. These will be averaged over ``n_sample_iterations`` iterations. Analyzing
the shift in peaks of :math:`\\hat{\\mathcal{D}}(\\sigma)` due to sampling can distinguish between characteristic
features and non-uniqueness due to a transformation/reduction of manifold coordinates. True features should not show
significant sensitivity to sampling while non-uniqueness/folds in the manifold will.
More information can be found in :cite:`Armstrong2021`.
:param sampling_percentages:
list or 1D array of fractions (between 0 and 1) of the provided data to sample for computing the normalized variance
:param indepvars:
independent variable values (size: n_observations x n_independent variables)
:param depvars:
dependent variable values (size: n_observations x n_dependent variables)
:param depvar_names:
list of strings corresponding to the names of the dependent variables (for saving values in a dictionary)
:param n_sample_iterations:
(optional, default 1) how many iterations for each ``sampling_percentages`` to average the normalized variance derivative over
:param verbose:
(optional, default True) when True, progress statements are printed
:param npts_bandwidth:
(optional, default 25) number of points to build a logspace of bandwidth values
:param min_bandwidth:
(optional, default to minimum nonzero interpoint distance) minimum bandwidth
:param max_bandwidth:
(optional, default to estimated maximum interpoint distance) maximum bandwidth
:param bandwidth_values:
(optional) array of bandwidth values, i.e. filter widths for a Gaussian filter, to loop over
:param scale_unit_box:
(optional, default True) center/scale the independent variables between [0,1] for computing a normalized variance so the bandwidth values have the same meaning in each dimension
:param n_threads:
(optional, default None) number of threads to run this computation. If None, default behavior of multiprocessing.Pool is used, which is to use all available cores on the current system.
:return:
- a dictionary of the normalized variance derivative (:math:`\\hat{\\mathcal{D}}(\\sigma)`) for each sampling percentage in ``sampling_percentages`` averaged over ``n_sample_iterations`` iterations
- the :math:`\\sigma` values used for computing :math:`\\hat{\\mathcal{D}}(\\sigma)`
- a dictionary of the ``VarianceData`` objects for each sampling percentage and iteration in ``sampling_percentages`` and ``n_sample_iterations``
"""
assert indepvars.ndim == 2, "independent variable array must be 2D: n_observations x n_variables."
assert depvars.ndim == 2, "dependent variable array must be 2D: n_observations x n_variables."
if isinstance(sampling_percentages, list):
for p in sampling_percentages:
assert p > 0., "sampling percentages must be between 0 and 1"
assert p <= 1., "sampling percentages must be between 0 and 1"
elif isinstance(sampling_percentages, np.ndarray):
assert sampling_percentages.ndim ==1, "sampling_percentages must be given as a list or 1D array"
for p in sampling_percentages:
assert p > 0., "sampling percentages must be between 0 and 1"
assert p <= 1., "sampling percentages must be between 0 and 1"
else:
raise ValueError("sampling_percentages must be given as a list or 1D array.")
normvar_data = {}
avg_der_data = {}
for p in sampling_percentages:
if verbose:
print('sampling', p * 100., '% of the data')
nv_data = {}
avg_der = {}
for it in range(n_sample_iterations):
if verbose:
print(' iteration', it + 1, 'of', n_sample_iterations)
rnd.seed(it)
idxsample = rnd.sample(list(np.arange(0, indepvars.shape[0])), int(p * indepvars.shape[0]))
nv_data[it] = compute_normalized_variance(indepvars[idxsample, :], depvars[idxsample, :], depvar_names,
npts_bandwidth=npts_bandwidth, min_bandwidth=min_bandwidth,
max_bandwidth=max_bandwidth, bandwidth_values=bandwidth_values,
scale_unit_box=scale_unit_box, n_threads=n_threads)
der, xder, _ = normalized_variance_derivative(nv_data[it])
for key in der.keys():
if it == 0:
avg_der[key] = der[key] / np.float(n_sample_iterations)
else:
avg_der[key] += der[key] / np.float(n_sample_iterations)
avg_der_data[p] = avg_der
normvar_data[p] = nv_data
return avg_der_data, xder, normvar_data
|
a340659a363760c01d38dd21b2a439b346751a23
| 31,136 |
def classify_data(X_train, Y_train, X_test):
"""Develop and train your very own variational quantum classifier.
Use the provided training data to train your classifier. The code you write
for this challenge should be completely contained within this function
between the # QHACK # comment markers. The number of qubits, choice of
variational ansatz, cost function, and optimization method are all to be
developed by you in this function.
Args:
X_train (np.ndarray): An array of floats of size (250, 3) to be used as training data.
Y_train (np.ndarray): An array of size (250,) which are the categorical labels
associated to the training data. The categories are labeled by -1, 0, and 1.
X_test (np.ndarray): An array of floats of (50, 3) to serve as testing data.
Returns:
str: The predicted categories of X_test, converted from a list of ints to a
comma-separated string.
"""
# Use this array to make a prediction for the labels of the data in X_test
predictions = []
# QHACK #
# Define output labels as quantum state vectors
def density_matrix(state):
"""Calculates the density matrix representation of a state.
Args:
state (array[complex]): array representing a quantum state vector
Returns:
dm: (array[complex]): array representing the density matrix
"""
return state * np.conj(state).T
label_0 = [[1], [0]]
label_1 = [[0], [1]]
state_labels = [label_0, label_1]
dev = qml.device("default.qubit", wires=1)
@qml.qnode(dev)
def qcircuit(params, x, y):
"""A variational quantum circuit representing the Universal classifier.
Args:
params (array[float]): array of parameters
x (array[float]): single input vector
y (array[float]): single output state density matrix
Returns:
float: fidelity between output state and input
"""
for p in params:
qml.Rot(*x, wires=0)
qml.Rot(*p, wires=0)
return qml.expval(qml.Hermitian(y, wires=[0]))
def cost(params, x, y, state_labels=None):
"""Cost function to be minimized.
Args:
params (array[float]): array of parameters
x (array[float]): 2-d array of input vectors
y (array[float]): 1-d array of targets
state_labels (array[float]): array of state representations for labels
Returns:
float: loss value to be minimized
"""
# Compute prediction for each input in data batch
loss = 0.0
dm_labels = [density_matrix(s) for s in state_labels]
for i in range(len(x)):
f = qcircuit(params, x[i], dm_labels[y[i]])
loss = loss + (1 - f) ** 2
return loss / len(x)
def clf_predict(params, x, state_labels=None):
"""
Tests on a given set of data.
Args:
params (array[float]): array of parameters
x (array[float]): 2-d array of input vectors
state_labels (array[float]): 1-d array of state representations for labels
Returns:
predicted (array([int]): predicted labels for test data
"""
dm_labels = [density_matrix(s) for s in state_labels]
predicted = []
for i in range(len(x)):
fidel_function = lambda y: qcircuit(params, x[i], y)
fidelities = [fidel_function(dm) for dm in dm_labels]
best_fidel = np.argmax(fidelities)
predicted.append(best_fidel)
return np.array(predicted)
def accuracy_score(y_true, y_pred):
"""Accuracy score.
Args:
y_true (array[float]): 1-d array of targets
y_predicted (array[float]): 1-d array of predictions
state_labels (array[float]): 1-d array of state representations for labels
Returns:
score (float): the fraction of correctly classified samples
"""
score = y_true == y_pred
return score.sum() / len(y_true)
def iterate_minibatches(inputs, targets, batch_size):
"""
A generator for batches of the input data
Args:
inputs (array[float]): input data
targets (array[float]): targets
Returns:
inputs (array[float]): one batch of input data of length `batch_size`
targets (array[float]): one batch of targets of length `batch_size`
"""
for start_idx in range(0, inputs.shape[0] - batch_size + 1, batch_size):
idxs = slice(start_idx, start_idx + batch_size)
yield inputs[idxs], targets[idxs]
def train(X_train, Y_train, learning_rate, X_test):
# Train using Adam optimizer and evaluate the classifier
num_layers = 3
epochs = 2
batch_size = 32
accuracy_train_best = 0
opt = qml.optimize.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999)
# initialize random weights
params = np.random.uniform(size=(num_layers, 3))
for it in range(epochs):
for Xbatch, ybatch in iterate_minibatches(X_train, Y_train, batch_size=batch_size):
params = opt.step(lambda v: cost(v, Xbatch, ybatch, state_labels), params)
predicted_train = clf_predict(params, X_train, state_labels)
accuracy_train = accuracy_score(Y_train, predicted_train)
loss = cost(params, X_train, Y_train, state_labels)
#print(accuracy_train)
if accuracy_train > accuracy_train_best:
best_params = params
accuracy_train_best = accuracy_train
if accuracy_train == 1:
break
return clf_predict(best_params, X_test, state_labels)
# label 1 vs label 0 & -1
Y_qubit_0 = np.zeros((len(Y_train),), dtype=int)
Y_qubit_0[Y_train == 1] += 1
# label -1 vs label 0 & 1
Y_qubit_1 = np.zeros((len(Y_train),), dtype=int)
Y_qubit_1[Y_train == -1] += 1
# qubit 0
Ypred_1 = train(X_train, Y_qubit_0, 0.6, X_test)
# qubit 1
Ypred_min1 = train(X_train, Y_qubit_1, 0.3, X_test)
predictions = np.zeros((len(X_test),), dtype=int)
predictions[Ypred_1 == 1] += 1
predictions[Ypred_min1 == 1] += -1
# QHACK #
return array_to_concatenated_string(predictions)
|
7af9c56490a782fb32f289042a23871bfb5f0a85
| 31,137 |
def preprocess_embedding(z):
"""Pre-process embedding.
Center and scale embedding to be between -.5, and .5.
Arguments:
z: A 2D NumPy array.
shape=(n_concept, n_dim)
Returns:
z_p: A pre-processed embedding.
"""
# Center embedding.
z_p = z - np.mean(z, axis=0, keepdims=True)
# Scale embedding.
max_val = np.max(np.abs(z_p))
z_p /= max_val
z_p /= 2
return z_p
|
6927e606ec61b0504e91b5b19e84a819d3e86274
| 31,138 |
def T_from_Ts(Ts,P,qt,es_formula=es_default):
""" Given theta_e solves implicitly for the temperature at some other pressure,
so that theta_e(T,P,qt) = Te
>>> T_from_Tl(282.75436951,90000,20.e-3)
290.00
"""
def zero(T,Ts,P,qt):
return np.abs(Ts-get_theta_s(T,P,qt,es_formula))
return optimize.fsolve(zero, 200., args=(Ts,P,qt), xtol=1.e-10)
|
ae2431127a3d0924da4b1e45d98c72a2c42b7a6c
| 31,140 |
from typing import Tuple
def find_global_peaks_integral(
cms: tf.Tensor, crop_size: int = 5, threshold: float = 0.2
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Find local peaks with integral refinement.
Integral regression refinement will be computed by taking the weighted average of
the local neighborhood around each rough peak.
Args:
cms: Confidence maps. Tensor of shape (samples, height, width, channels).
crop_size: Size of patches to crop around each rough peak as an integer scalar.
threshold: Minimum confidence threshold. Peaks with values below this will be
set to NaNs.
Returns:
A tuple of (peak_points, peak_vals).
peak_points: float32 tensor of shape (samples, channels, 2), where the last axis
indicates peak locations in xy order.
peak_vals: float32 tensor of shape (samples, channels) containing the values at
the peak points.
"""
return find_global_peaks(
cms, threshold=threshold, refinement="integral", integral_patch_size=crop_size
)
|
22529379fdf1e685b33319e0432a95ee51973575
| 31,142 |
def test_data(test_data_info, pytestconfig):
"""Fixture provides test data loaded from files automatically."""
data_pattern = pytestconfig.getoption(constants.OPT_DATA_PATTERN)
if test_data_info.subdirs:
return list(
list(loader.each_data_under_dir(
test_data_info.datadir / subdir, data_pattern
))
for subdir in test_data_info.subdirs
)
return list(
loader.each_data_under_dir(test_data_info.datadir, data_pattern)
)
|
639c9b16b9f6e1a4bbb5422d0ce29850535a267b
| 31,143 |
def load_maggic_data():
"""Load MAGGIC data.
Returns:
orig_data: Original data in pandas dataframe
"""
# Read csv files
file_name = 'data/Maggic.csv'
orig_data = pd.read_csv(file_name, sep=',')
# Remove NA
orig_data = orig_data.dropna(axis=0, how='any')
# Remove labels
orig_data = orig_data.drop(['death_all','days_to_fu'], axis = 1)
return orig_data
|
b4d1c219ef892c9218fd45aba22c859c96954853
| 31,144 |
def create_res_hessian_computing_tf_graph(input_shape, layer_kernel, layer_stride):
"""
This function create the TensorFlow graph for computing hessian matrix for res layer.
Step 1: It first extract image patches using tf.extract_image_patches.
Step 2: Then calculate the hessian matrix by outer product.
Args:
input_shape: the dimension of input
layer_kernel: kernel size of the layer
layer_stride: stride of the layer
Output:
input_holder: TensorFlow placeholder for layer input
get_hessian_op: A TensorFlow operator to calculate hessian matrix
"""
input_holder = tf.placeholder(dtype=tf.float32, shape=input_shape)
patches = tf.extract_image_patches(images = input_holder,
ksizes = [1,layer_kernel, layer_kernel,1],
strides = [1, layer_stride, layer_stride, 1],
rates = [1, 1, 1, 1],
padding = 'SAME')
print 'Patches shape: %s' %patches.get_shape()
a = tf.expand_dims(patches, axis=-1)
b = tf.expand_dims(patches, axis=3)
outprod = tf.multiply(a, b)
# print 'outprod shape: %s' %outprod.get_shape()
get_hessian_op = tf.reduce_mean(outprod, axis=[0, 1, 2])
print 'Hessian shape: %s' % get_hessian_op.get_shape()
return input_holder, get_hessian_op
|
aaf5f52b32f1f8f67d54d70c10c706dc26b91a75
| 31,145 |
def loadUiType(uiFile):
"""
Pyside lacks the "loadUiType" command, so we have to convert the ui file to py code in-memory first
and then execute it in a special frame to retrieve the form_class.
http://tech-artists.org/forum/showthread.php?3035-PySide-in-Maya-2013
"""
parsed = xml.parse(uiFile)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
with open(uiFile, 'r') as f:
o = StringIO()
frame = {}
pysideuic.compileUi(f, o, indent=0)
pyc = compile(o.getvalue(), '<string>', 'exec')
exec pyc in frame
#Fetch the base_class and form class based on their type in the xml from designer
form_class = frame['Ui_%s'%form_class]
base_class = eval('QtGui.%s'%widget_class)
return form_class, base_class
|
2b5bfcdefb0d1c0fb361a4d080eea0b0cc927599
| 31,146 |
def ldns_native2rdf_int8(*args):
"""LDNS buffer."""
return _ldns.ldns_native2rdf_int8(*args)
|
21e295dcd9a67018bd673dc5ff605e80707d9736
| 31,147 |
import re
def _ListProcesses(args, req_vars): # pylint: disable=W0613
"""Lists processes and their CPU / mem stats.
The response is formatted according to the Google Charts DataTable format.
"""
device = _GetDevice(args)
if not device:
return _HTTP_GONE, [], 'Device not found'
resp = {
'cols': [
{'label': 'Pid', 'type':'number'},
{'label': 'Name', 'type':'string'},
{'label': 'Cpu %', 'type':'number'},
{'label': 'Mem RSS Kb', 'type':'number'},
{'label': '# Threads', 'type':'number'},
],
'rows': []}
for process in device.ListProcesses():
# Exclude system apps if the request didn't contain the ?all=1 arg.
if not req_vars.get('all') and not re.match(_APP_PROCESS_RE, process.name):
continue
stats = process.GetStats()
resp['rows'] += [{'c': [
{'v': process.pid, 'f': None},
{'v': process.name, 'f': None},
{'v': stats.cpu_usage, 'f': None},
{'v': stats.vm_rss, 'f': None},
{'v': stats.threads, 'f': None},
]}]
return _HTTP_OK, [], resp
|
636f91e3dcf6b236081787096c12eadf956c9024
| 31,148 |
def _runForRunnerUser(resourceDB: ResourceDB, user: User) -> TaskRun:
"""Returns the task run accessible to the given user.
Raises AccessDenied if the user does not represent a Task Runner
or the Task Runner is not running any task.
"""
# Get Task Runner.
if not isinstance(user, TokenUser):
raise AccessDenied('This operation is exclusive to Task Runners')
try:
runner = resourceDB.runnerFromToken(user)
except KeyError as ex:
raise AccessDenied(*ex.args) from ex
# Get active task from Task Runner.
run = runner.getRun()
if run is None:
raise AccessDenied('Idle Task Runner cannot access jobs')
else:
return run
|
e3b89ed95bea8c50be885ef56578e23919a7a25c
| 31,149 |
def narrow(lon='auto',lat='auto',ax=None,lfactor=1,**kwargs):
"""
Plot north arrow.
Parameters:
lon: Starting longitude (decimal degrees) for arrow
lat: Starting latitude (ecimal degrees) for arrow
ax: Axes on which to plot arrow
lfactor: Length factor to increase/decrease arrow length
Returns:
ax: Axes with arrow plotted
"""
if ax is None:
ax = plt.gca()
geodesic = cgeo.Geodesic() # Set up geodesic calculations
# Get map projection from axes
crs = ax.projection
if (lon=='auto')&(lat=='auto'):
trans = ax.transAxes + ax.transData.inverted()
sx,sy = trans.transform((0.2,0.2))
lon,lat = ccrs.Geodetic().transform_point(sx,sy,src_crs=crs)
# Get geodetic projection for lat/lon - do not confuse with geodesic
gdet = ccrs.Geodetic()
# Get axes extent and convert to lat/lon
x1,x2,y1,y2 = ax.get_extent()
tlx,tly = gdet.transform_point(x1,y2,src_crs=crs)
blx,bly = gdet.transform_point(x1,y1,src_crs=crs)
diff = abs(bly-tly) # Get x coverage of plot in decimal degrees
# Get arrow endpoint scaled by diff and lfactor
end = geodesic.direct(
points=[lon,lat],azimuths=0,distances=lfactor*diff*2*10**4)[0]
# Transform lat-lon into axes coordinates
xstart,ystart = crs.transform_point(lon,lat,src_crs=ccrs.Geodetic())
# Get X-Y coordinates of endpoint
xend,yend = crs.transform_point(end[0],end[1],src_crs=ccrs.Geodetic())
# Plot arrow as annotation
ax.annotate("",xy=(xstart,ystart),xycoords='data',xytext=(xend,yend),
textcoords='data',arrowprops=dict(arrowstyle="<|-",
connectionstyle="arc3"))
# Add N to arrow
ax.text(xend,yend,'N',fontsize=7,ha='center')
return(ax)
|
8d3162ab05366472b607ff953811caf2796c1e3e
| 31,150 |
from typing import List
from pathlib import Path
def dump_content_descriptor(artifact_manager: ArtifactsManager) -> ArtifactsReport:
""" Dumping content/content_descriptor.json into:
1. content_test
2. content_new
3. content_all
Args:
artifact_manager: Artifacts manager object.
Returns:
ArtifactsReport: ArtifactsReport object.
Notes:
1. content_descriptor.json created during build run time.
"""
report = ArtifactsReport("Content descriptor:")
if not artifact_manager.only_content_packs and artifact_manager.content.content_descriptor:
descriptor = artifact_manager.content.content_descriptor
object_report = ObjectReport(descriptor, content_test=True, content_new=True, content_all=True)
created_files: List[Path] = []
for dest in [artifact_manager.content_test_path,
artifact_manager.content_new_path,
artifact_manager.content_all_path]:
created_files = dump_link_files(artifact_manager, descriptor, dest, created_files)
report.append(object_report)
return report
|
e39bc38c72cd3f7b555d410d037ef1df3c747933
| 31,151 |
def parse_movie(line, sep='::'):
"""
Parses a movie line
Returns: tuple of (movie_id, title)
"""
fields = line.strip().split(sep)
movie_id = int(fields[0]) # convert movie_id to int
title = fields[1]
return movie_id, title
|
9d7a13ca3ddf823ff22582f648434d4b6df00207
| 31,152 |
def make_plot(dataset, plot_label, xlabel, ylabel, legend):
"""
Generates a MatPlotLib plot from the specified dataset and with the specified labeling features.
make_plot(dataset, plot_label, xlabel, ylabel, legend) -> plt
@type dataset: list of list
@param dataset: formatted dataset.
@type plot_label: string
@param plot_label: plot title.
@type xlabel: string
@param xlabel: x-axis plot label.
@type ylabel: string
@param ylabel: y-axis plot label
@type legend: tuple
@param legend: plot legend.
@rtype: MatPlotLib plot
@return: plot
"""
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.6, 0.75])
plt.title(plot_label)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
for data, lbl in zip(dataset, legend):
ax.plot(data[0], data[1], label = lbl)
ax.legend(bbox_to_anchor = (1.05, 1), loc = 2, borderaxespad = 0.)
return plt
|
0e5ff583c65dcd3751354a416d9940a12871e37d
| 31,153 |
def flatten_list(in_list: typ.List) -> typ.List:
"""Flattens list"""
result = []
for item in in_list:
if isinstance(item, list):
result.extend(flatten_list(item))
else:
result.append(item)
return result
|
9d847fea7f1eb30ecbd51dd1d001c8661a502a0d
| 31,154 |
import logging
def formatMessage(data):
"""
Format incoming message before passing to Discord
"""
logging.info("Formatting message payload...")
time = (data.occurredAt).split("T")
message = [":alarm_clock: __**Meraki Alert**__ :alarm_clock: "]
message.append(f"**Device:** {data.deviceName}")
message.append(f"**Message info:** {data.alertType}")
message.append(f"**Occurred at:** {time[0]} - {time[1][:8]}")
if len(data.alertData) > 0:
message.append(f"**Additional data:** ```fix\r\n{data.alertData}\r\n```")
sendmessage = ""
for each in message:
sendmessage += each + "\r\n"
return sendmessage
|
21d7a50951aeecb6917479622f4131b7ddcfda00
| 31,155 |
def _convert_nearest_neighbors(operator, container, k=None, radius=None):
"""
Common parts to regressor and classifier. Let's denote
*N* as the number of observations, *k*
the number of neighbours. It returns
the following intermediate results:
top_indices: [N, k] (int64), best indices for
every observation
top_distances: [N, k] (dtype), float distances
for every observation, it can be None
if the weights are uniform
top_labels: [N, k] (label type), labels
associated to every top index
weights: [N, k] (dtype), if top_distances is not None,
returns weights
norm: [N] (dtype), if top_distances is not None,
returns normalized weights
axis: 1 if there is one dimension only, 2 if
this is a multi-regression or a multi classification
"""
X = operator.inputs[0]
op = operator.raw_operator
opv = container.target_opset
dtype = guess_numpy_type(X.type)
if dtype != np.float64:
dtype = np.float32
proto_type = guess_proto_type(X.type)
if proto_type != onnx_proto.TensorProto.DOUBLE:
proto_type = onnx_proto.TensorProto.FLOAT
if isinstance(X.type, Int64TensorType):
X = OnnxCast(X, to=proto_type, op_version=opv)
options = container.get_options(op, dict(optim=None))
single_reg = (not hasattr(op, '_y') or len(op._y.shape) == 1 or
len(op._y.shape) == 2 and op._y.shape[1] == 1)
ndim = 1 if single_reg else op._y.shape[1]
metric = (op.effective_metric_ if hasattr(op, 'effective_metric_') else
op.metric)
neighb = op._fit_X.astype(dtype)
if (hasattr(op, 'n_neighbors') and op.n_neighbors is not None and
hasattr(op, 'radius') and op.radius is not None):
raise RuntimeError(
"The model defines radius and n_neighbors at the "
"same time ({} and {}). "
"This case is not supported.".format(
op.radius, op.n_neighbors))
if hasattr(op, 'n_neighbors') and op.n_neighbors is not None:
k = op.n_neighbors if k is None else k
radius = None
elif hasattr(op, 'radius') and op.radius is not None:
k = None
radius = op.radius if radius is None else radius
else:
raise RuntimeError(
"Cannot convert class '{}'.".format(op.__class__.__name__))
training_labels = op._y if hasattr(op, '_y') else None
distance_kwargs = {}
if metric == 'minkowski':
if op.p != 2:
distance_kwargs['p'] = op.p
else:
metric = "euclidean"
weights = op.weights if hasattr(op, 'weights') else 'distance'
binary = None
if weights == 'uniform' and radius is None:
top_indices = onnx_nearest_neighbors_indices_k(
X, neighb, k, metric=metric, dtype=dtype,
op_version=opv, optim=options.get('optim', None),
**distance_kwargs)
top_distances = None
elif radius is not None:
three = onnx_nearest_neighbors_indices_radius(
X, neighb, radius, metric=metric, dtype=dtype,
op_version=opv, keep_distances=True,
proto_dtype=proto_type,
optim=options.get('optim', None),
**distance_kwargs)
top_indices, top_distances, binary = three
elif weights == 'distance':
top_indices, top_distances = onnx_nearest_neighbors_indices_k(
X, neighb, k, metric=metric, dtype=dtype,
op_version=opv, keep_distances=True,
optim=options.get('optim', None),
**distance_kwargs)
else:
raise RuntimeError(
"Unable to convert KNeighborsRegressor when weights is callable.")
if training_labels is not None:
if ndim > 1:
training_labels = training_labels.T
axis = 2
else:
training_labels = training_labels.ravel()
axis = 1
if opv >= 9:
kor = k if k is not None else training_labels.shape[-1]
if ndim > 1:
shape = np.array([ndim, -1, kor], dtype=np.int64)
else:
shape = np.array([-1, kor], dtype=np.int64)
else:
raise RuntimeError(
"Conversion of a KNeighborsRegressor for multi regression "
"requires opset >= 9.")
if training_labels.dtype == np.int32:
training_labels = training_labels.astype(np.int64)
flattened = OnnxFlatten(top_indices, op_version=opv)
extracted = OnnxArrayFeatureExtractor(
training_labels, flattened, op_version=opv)
reshaped = OnnxReshape(extracted, shape, op_version=opv)
if ndim > 1:
reshaped = OnnxTranspose(reshaped, op_version=opv, perm=[1, 0, 2])
reshaped.set_onnx_name_prefix('knny')
else:
reshaped = None
axis = 1
if binary is not None:
if op.weights == 'uniform':
wei = binary
else:
modified = OnnxMax(top_distances, np.array([1e-6], dtype=dtype),
op_version=opv)
wei = OnnxMul(binary, OnnxReciprocal(modified, op_version=opv),
op_version=opv)
norm = OnnxReduceSum(wei, op_version=opv, axes=[1], keepdims=0)
elif top_distances is not None:
modified = OnnxMax(top_distances, np.array([1e-6], dtype=dtype),
op_version=opv)
wei = OnnxReciprocal(modified, op_version=opv)
norm = OnnxReduceSum(wei, op_version=opv, axes=[1], keepdims=0)
else:
norm = None
wei = None
if wei is not None:
wei.set_onnx_name_prefix('wei')
if norm is not None:
norm.set_onnx_name_prefix('norm')
return top_indices, top_distances, reshaped, wei, norm, axis
|
c328d2f78467df05ddcfd83186e0704e234eabd8
| 31,156 |
import click
def interface_alias_to_name(config_db, interface_alias):
"""Return default interface name if alias name is given as argument
"""
vlan_id = ""
sub_intf_sep_idx = -1
if interface_alias is not None:
sub_intf_sep_idx = interface_alias.find(VLAN_SUB_INTERFACE_SEPARATOR)
if sub_intf_sep_idx != -1:
vlan_id = interface_alias[sub_intf_sep_idx + 1:]
# interface_alias holds the parent port name so the subsequent logic still applies
interface_alias = interface_alias[:sub_intf_sep_idx]
# If the input parameter config_db is None, derive it from interface.
# In single ASIC platform, get_port_namespace() returns DEFAULT_NAMESPACE.
if config_db is None:
namespace = get_port_namespace(interface_alias)
if namespace is None:
return None
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
port_dict = config_db.get_table('PORT')
if interface_alias is not None:
if not port_dict:
click.echo("port_dict is None!")
raise click.Abort()
for port_name in port_dict:
if interface_alias == port_dict[port_name]['alias']:
return port_name if sub_intf_sep_idx == -1 else port_name + VLAN_SUB_INTERFACE_SEPARATOR + vlan_id
# Interface alias not in port_dict, just return interface_alias, e.g.,
# portchannel is passed in as argument, which does not have an alias
return interface_alias if sub_intf_sep_idx == -1 else interface_alias + VLAN_SUB_INTERFACE_SEPARATOR + vlan_id
|
6cac69be850a65dda505999616e4d21d0a8c9438
| 31,157 |
import time
def twait(phrase, tn, tout=-1, logging='off', rcontent=False, screenprint=False):
""" telnetlib wait with optional timeout and optional logging"""
# Adding code to allow lists for phrase
finalcontent = ' '
#This is the time of the epoch
startTime = int(time.time())
while True:
# This is the current time
currentTime = int(time.time())
if tout != -1:
# This is the time since the start of this loop
# if it exceeds the timeout value passed to it
# then exit with a return of 0
if (currentTime - startTime) > tout:
if logging == 'on':
#Adding the -e-e-> to differentiate from device output
if screenprint:
print('-e-e->It has been ' + str(tout) + ' seconds. Timeout!')
if not rcontent:
return 0
return 0, finalcontent
# Eager reading back from the device
content = (tn.read_very_eager().decode().strip())
if content.strip() != '':
finalcontent += content
# if the returned content isn't blank. This stops
# it from spamming new line characters
if content.strip() != '':
if screenprint:
print(content, end='')
# content was found! Return a 1 for success
if isinstance(phrase, str):
if phrase in content:
if not rcontent:
return 1
return 1, finalcontent
if isinstance(phrase, list):
count = 1
for p in phrase:
if p in content:
if not rcontent:
return count
return count, finalcontent
count += 1
|
9c4308e873321fd556d8eec2668981fc2843ae87
| 31,158 |
def process_ob(obs : dict) -> dict:
""" Processes an individual post
:param obs: video observation metadata
:returns: processed post metadata based on tags in posts_titles
"""
metadata = {tag: obs[tag] for tag in obs_tags}
metadata.update(process_obs(obs["children"], obs["history"]))
metadata.update({"uid+date": metadata["student uid"] + metadata["section date"]})
# for k,v in metadata.items():
# print(k, v)
folders = metadata["folders"]
if "peer_observation" in folders or "video observation" in metadata["title"].lower():
if "logistics" not in folders and "self_reflection_makeup" not in folders :
return metadata
raise Exception(metadata["title"], " is in folders ", metadata["folders"], " and was not included in csv")
|
e5200443d6ba6e69d49fb56d377f4a9992f157bb
| 31,159 |
from beta import calc_beta_eta
def calc_detectable_frac(gen, model, args, gen2=None, swap_h0_and_h1=False, verbose=0):
"""
:param gen: sample generator
:param model: NN model
:param args: parameters object (should contain alpha and beta attributes)
:param gen2: if gen2 is not None gen output is used for 0-hypothesis and gen2 for alternative
otherwise frac > 0 condition is used
:return: (frac, alpha) minimal fraction of source events in alternative (gen2) hypothesis and precise alpha or (1., 1.) if detection is impossible
"""
if swap_h0_and_h1:
_alpha = args.beta
_beta = args.alpha
else:
_alpha = args.alpha
_beta = args.beta
fracs, beta, th_eta = calc_beta_eta(gen, model, args.alpha, gen2=gen2, beta_threshold=args.beta, verbose=verbose)
return th_eta, args.alpha
|
6844abc148cee69453eb3136e018a40eccb40334
| 31,160 |
import numpy as np
from typing import Tuple
def similarity_std(
buffer: NumpyNDArray, wav_file: WavFile, rate: int, threshold: float
) -> Tuple[bool, float, float]:
"""Check the similarity of a recorded sound buffer and a given wav_file.
Use a correlation check using the standard deviation."""
_, _, current_fft = perform_fft(buffer[:wav_file.signal_length], rate, True)
corrcoef = np.correlate(
wav_file.signal_fft / wav_file.signal_fft.std(),
current_fft / current_fft.std()
)[0] / len(wav_file.signal_fft)
return corrcoef >= threshold, corrcoef, threshold
|
8900495860f59b39be3f5f3808bb321e0275bc81
| 31,161 |
def grdtrack(points, grid, newcolname=None, outfile=None, **kwargs):
"""
Sample grids at specified (x,y) locations.
Grdtrack reads one or more grid files and a table with (x,y) [or (lon,lat)]
positions in the first two columns (more columns may be present). It
interpolates the grid(s) at the positions in the table and writes out the
table with the interpolated values added as (one or more) new columns. A
bicubic [Default], bilinear, B-spline or nearest-neighbor interpolation is
used, requiring boundary conditions at the limits of the region (see
*interpolation*; Default uses “natural” conditions (second partial
derivative normal to edge is zero) unless the grid is automatically
recognized as periodic.)
Full option list at :gmt-docs:`grdtrack.html`
{aliases}
Parameters
----------
points : pandas.DataFrame or str
Either a table with (x, y) or (lon, lat) values in the first two
columns, or a filename (e.g. csv, txt format). More columns may be
present.
grid : xarray.DataArray or str
Gridded array from which to sample values from, or a filename (netcdf
format).
newcolname : str
Required if 'points' is a pandas.DataFrame. The name for the new column
in the track pandas.DataFrame table where the sampled values will be
placed.
outfile : str
Required if 'points' is a file. The file name for the output ASCII
file.
{V}
{n}
Returns
-------
track: pandas.DataFrame or None
Return type depends on whether the outfile parameter is set:
- pandas.DataFrame table with (x, y, ..., newcolname) if outfile is not
set
- None if outfile is set (track output will be stored in outfile)
"""
with GMTTempFile(suffix=".csv") as tmpfile:
with Session() as lib:
# Store the pandas.DataFrame points table in virtualfile
if data_kind(points) == "matrix":
if newcolname is None:
raise GMTInvalidInput("Please pass in a str to 'newcolname'")
table_context = lib.virtualfile_from_matrix(points.values)
elif data_kind(points) == "file":
if outfile is None:
raise GMTInvalidInput("Please pass in a str to 'outfile'")
table_context = dummy_context(points)
else:
raise GMTInvalidInput(f"Unrecognized data type {type(points)}")
# Store the xarray.DataArray grid in virtualfile
if data_kind(grid) == "grid":
grid_context = lib.virtualfile_from_grid(grid)
elif data_kind(grid) == "file":
grid_context = dummy_context(grid)
else:
raise GMTInvalidInput(f"Unrecognized data type {type(grid)}")
# Run grdtrack on the temporary (csv) points table
# and (netcdf) grid virtualfile
with table_context as csvfile:
with grid_context as grdfile:
kwargs.update({"G": grdfile})
if outfile is None: # Output to tmpfile if outfile is not set
outfile = tmpfile.name
arg_str = " ".join(
[csvfile, build_arg_string(kwargs), "->" + outfile]
)
lib.call_module(module="grdtrack", args=arg_str)
# Read temporary csv output to a pandas table
if outfile == tmpfile.name: # if user did not set outfile, return pd.DataFrame
column_names = points.columns.to_list() + [newcolname]
result = pd.read_csv(tmpfile.name, sep="\t", names=column_names)
elif outfile != tmpfile.name: # return None if outfile set, output in outfile
result = None
return result
|
ed6a09c4f925321520c99e2a941602e82852bec3
| 31,162 |
def add(x, y):
"""add two number"""
return x+y
|
9c5fe5867e0c345bd3e7439b8fc76c21b20b2c35
| 31,163 |
def is_exponential(character: str) -> bool:
"""
Whether an IPA character is written above the base line
and to the right of the previous character,
like how exponents of a power are written
in mathematical notation.
"""
return character in exponentials
|
0d263a0969454ad9d8e7a3a53e393b55b5c8d45c
| 31,164 |
def _union_items(baselist, comparelist):
"""Combine two lists, removing duplicates."""
return list(set(baselist) | set(comparelist))
|
782f325960db2482afc75e63dbc8a51fea24c8d0
| 31,165 |
def broadcast_state(state=current_state, ip=ip_osc, port=port_client):
"""
Broadcasts state
"""
print("Called Broadcast State Function")
#client = udp_client.UDPClient(ip, port,1)
#builder = osc_message_builder.OscMessageBuilder(address='/status')
#for k,v in state.items():
# builder.add_arg(v)
#client.send(builder.build())
#print("sent {0} to {1}:{2}".format(builder.args, ip, port))
return None
|
be8b391cbd033b7271796ec47529b8ca78f85bd5
| 31,167 |
def make_gaussian(shape, var):
"""returns 2d gaussian of given shape and variance"""
h,w = shape
x = np.arange(w, dtype=float)
y = np.arange(h, dtype=float)[:,np.newaxis]
x0 = w // 2
y0 = h // 2
mat = np.exp(-0.5 * (pow(x-x0, 2) + pow(y-y0, 2)) / var)
normalized_img = np.zeros((h, w))
cv2.normalize(mat, normalized_img, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)
return normalized_img
|
88500f08c43b446ea073fef322f2007ea4032875
| 31,168 |
def analysis_iou_on_miyazakidata(result_path, pattern="GT_o"):
"""对最终4张图片统合的结果进行测试"""
ious = []
pattern1, pattern2 = pattern.strip().split("_")
pattern1 = pattern_dict[pattern1]
pattern2 = pattern_dict[pattern2]
for i in range(1, 21):
image1 = get_image(result_path, i, pattern1)
image2 = get_image(result_path, i, pattern2)
# handled_gt_path = os.path.join(result_path, "{}_mask.png".format(i))
# gt_path = os.path.join(result_path, "{}_M.png".format(i))
# image_path = os.path.join(result_path, "{}_testT.png".format(i))
# gt_path = np.array(Image.open(gt_path))
# image_path = np.array(Image.open(image_path))
ious.append(calcu_iou(image1, image2))
print(ious)
print("average of iou is {:.4f}".format(np.mean(ious)))
return ious
|
5099cde71284c0badae64e701a2362b30f87ad4c
| 31,170 |
def min_max_rdist(node_data, x: FloatArray):
"""Compute the minimum and maximum distance between a point and a node"""
lower_bounds = node_data[0]
upper_bounds = node_data[1]
min_dist = 0.0
max_dist = 0.0
for j in range(x.size):
d_lo = lower_bounds[j] - x[j]
d_hi = x[j] - upper_bounds[j]
# d = ((d_lo + abs(d_lo)) + (d_hi + abs(d_hi))) # twice as big as actual
d = max(d_lo, 0) + max(d_hi, 0)
min_dist += d * d
d = max(abs(d_lo), abs(d_hi))
max_dist += d * d
# min_dist *= 0.25
return min_dist, max_dist
|
03f80210ada3530c692ce24db3bb92f0156a2d6f
| 31,171 |
def strip_newlines(s, nleading=0, ntrailing=0):
"""strip at most nleading and ntrailing newlines from s"""
for _ in range(nleading):
if s.lstrip(' \t')[0] == '\n':
s = s.lstrip(' \t')[1:]
elif s.lstrip(' \t')[0] == '\r\n':
s = s.lstrip(' \t')[2:]
for _ in range(ntrailing):
if s.rstrip(' \t')[-2:] == '\r\n':
s = s.rstrip(' \t')[:-2]
elif s.rstrip(' \t')[-1:] == '\n':
s = s.rstrip(' \t')[:-1]
return s
|
cd9c55d4ac7828d9506567d879277a463d896c46
| 31,172 |
def xyz_to_lab(x_val, y_val, z_val):
"""
Convert XYZ color to CIE-Lab color.
:arg float x_val: XYZ value of X.
:arg float y_val: XYZ value of Y.
:arg float z_val: XYZ value of Z.
:returns: Tuple (L, a, b) representing CIE-Lab color
:rtype: tuple
D65/2° standard illuminant
"""
xyz = []
for val, ref in (x_val, 95.047), (y_val, 100.0), (z_val, 108.883):
val /= ref
val = pow(val, 1 / 3.0) if val > 0.008856 else 7.787 * val + 16 / 116.0
xyz.append(val)
x_val, y_val, z_val = xyz # pylint: disable=unbalanced-tuple-unpacking
cie_l = 116 * y_val - 16
cie_a = 500 * (x_val - y_val)
cie_b = 200 * (y_val - z_val)
return cie_l, cie_a, cie_b
|
c2478772659a5d925c4db0b6ba68ce98b6537a59
| 31,173 |
def get_core_v1_api():
"""
:return: api client
"""
config.load_kube_config()
core_api = client.CoreV1Api()
return core_api
|
2169b3dfe7a603c1d870c0100d96640f4688023c
| 31,174 |
def ssbm(n: int, k: int, p: float, q: float, directed=False):
"""
Generate a graph from the symmetric stochastic block model.
Generates a graph with n vertices and k clusters. Every cluster will have floor(n/k) vertices. The probability of
each edge inside a cluster is given by p. The probability of an edge between two different clusters is q.
:param n: The number of vertices in the graph.
:param k: The number of clusters.
:param p: The probability of an edge inside a cluster.
:param q: The probability of an edge between clusters.
:param directed: Whether to generate a directed graph.
:return: The generated graph as an ``sgtl.Graph`` object.
"""
# Make sure that the value q is an integer or float
try:
p = float(p)
q = float(q)
except Exception:
raise TypeError("The probabilities p and q must be numbers between 0 and 1.")
# Every cluster has the same size.
cluster_sizes = [int(n/k)] * k
# Construct the k*k probability matrix Q. The off-diagonal entries are all q and the diagonal entries are all p.
prob_mat_q = []
for row_num in range(k):
new_row = [q] * k
new_row[row_num] = p
prob_mat_q.append(new_row)
# Call the general sbm method.
return sbm(cluster_sizes, prob_mat_q, directed=directed)
|
f849bb41afe617e6d66e043280b4606e1d5390c4
| 31,175 |
def std_normal_dist_cumulative(lower, upper):
"""
Find the area under the standard normal distribution between
the lower and upper bounds.
Bounds that aren't specified are taken at infinity.
"""
return find_distribution_area(stats.norm, lower, upper)
|
210a16d29d8e07949dbe67c7486e9fea1656ce76
| 31,177 |
def ann_pharm_variant(dataframe, genome, knowledgebase, variant_type):
"""query with chr, start, stop, ref, alt, genome assembly version. Returns all the drugs
targeting the observed variant. """
all_direct_targets = {}
rows = get_variant_list(dataframe)
for r in rows:
direct_target_list = []
gene = r[0]
if variant_type == "MUT":
search_subset = {"chromosome": r[1], "start": r[2], "stop": r[3],
"reference_base": r[4], "alteration_base": r[5], "assembly_version": genome}
elif variant_type == "CNA":
search_subset = {"chromosome": r[1], "start": r[2], "stop": r[3],
"global_type": r[4], "assembly_version": genome}
superset = get_inner_dict(gene, "variant_annotation", knowledgebase)
if not superset:
continue
for variant in superset:
v_index = superset.index(variant)
search_set = superset[v_index]
coverage = all(item in search_set.items()
for item in search_subset.items())
if coverage:
direct_target_list.append(search_set)
if not direct_target_list:
continue
if gene in all_direct_targets:
all_direct_targets[gene].extend(direct_target_list)
else:
all_direct_targets[gene] = direct_target_list
return all_direct_targets
|
cf1b8be54bf5ccb1bfa29afbe2af5020246ac7ea
| 31,178 |
from typing import List
from typing import Tuple
def to_pier_settlement(
config: Config,
points: List[Point],
responses_array: List[List[float]],
response_type: ResponseType,
pier_settlement: List[Tuple[PierSettlement, PierSettlement]],
) -> List[List[float]]:
"""Time series of responses to pier settlement.
Args:
config: simulation configuration object.
points: points in the TrafficArray.
responses_array: NumPY array indexed first by point then time.
response_type: the sensor response type to add.
pier_settlement: start and end settlements of piers.
Returns: NumPY array of same shape as "responses_array" and considering the
same points, but only containing the responses from pier settlement.
"""
if len(pier_settlement) == 0:
return np.zeros(responses_array.shape)
assert len(responses_array) == len(points)
for ps in pier_settlement:
assert ps[0].pier == ps[1].pier
# Sorted by pier settlement index then by point index.
unit_responses = [
load(
config=config,
response_type=response_type,
pier_settlement=[
PierSettlement(pier=ps[0].pier, settlement=config.unit_pier_settlement)
],
).at_decks(points)
for ps in pier_settlement
]
assert len(unit_responses) == len(pier_settlement)
assert len(unit_responses[0]) == len(points)
start_responses, end_responses = [0 for _ in points], [0 for _ in points]
for p_i, _ in enumerate(points):
for ps_i, ps in enumerate(pier_settlement):
start_responses[p_i] += unit_responses[ps_i][p_i] * (
ps[0].settlement / config.unit_pier_settlement
)
end_responses[p_i] += unit_responses[ps_i][p_i] * (
ps[1].settlement / config.unit_pier_settlement
)
ps_responses = np.zeros(responses_array.shape)
for p, _ in enumerate(points):
ps_responses[p] = np.interp(
np.arange(ps_responses.shape[1]),
[0, ps_responses.shape[1] - 1],
[start_responses[p], end_responses[p]],
)
return ps_responses
|
48879533d392f89cb39e7bea4b6565b8ff172279
| 31,179 |
def subtract_something(a, b):
"""
Substracts something from something else.
a: add-able
First thing
b: add-able
Second thing.
Returns:
--------
Result of adding the two.
"""
return a - b
|
e090d09fc3fd35b080b12584d663519b39ed24c9
| 31,180 |
from pathlib import Path
def last_rådata(datafil, track=None):
"""Last inn rådata fra en gpx-fil eller en GPSLogger csv-fil.
"""
datafil = Path(datafil)
if datafil.suffix == '.gpx':
sjekk_at_gpxpy_er_installert()
return last_rådata_gpx(datafil, track=track)
elif datafil.suffix == '.csv':
if track is not None:
warn("Du kan kun spesifisere track dersom du bruker gpx-filer.")
return last_rådata_gpslogger(datafil)
else:
raise ValueError("Filtypen må enten være csv (for GPSLogger csv filer) eller gpx.")
|
c0e7d638c858ef97cde7c8c0ce2ea9796ff4c7de
| 31,181 |
from datetime import datetime
def getAverageStay(date1, date2, hop):
"""
The average number of days a patient stays at a hospital (from admission to discharge)
if hop is None it calculates the stat system wide
"""
count = 0
num = 0
for visit in Appointment.objects.all():
doc = Doctor.objects.get(id=visit.doctorID.id)
if doc.hospitalID == hop or hop == None:
if visit.aptDate >= datetime.date(year=date1.year, month=date1.month,
day=date1.day) and visit.aptDate <= datetime.date(year=date2.year,
month=date2.month,
day=date2.day):
if ExtendedStay.objects.filter(appointmentID=visit.id).exists():
stay = ExtendedStay.objects.get(appointmentID=visit.id)
count += (stay.endDate - visit.aptDate).days
num += 1
if num == 0:
return 0
else:
return count / num
|
9044d415d6231e51ee4eecb5e00e3a531fb209fb
| 31,182 |
def classify(inputTree,featLabels,testVec):
"""
决策树分类函数
"""
firstSides = list(myTree.keys())
firstStr = firstSides[0]
secondDict = inputTree[firstStr]
featIndex = featLabels.index(firstStr)
for key in secondDict.keys():
if testVec[featIndex]==key:
if type(secondDict[key]).__name__=='dict':
classLabel=classify(secondDict[key],featLabels,testVec)
else: classLabel=secondDict[key]
return classLabel
|
3daee2b72332c14eab41fd00bf781f00b4e1e633
| 31,183 |
def update(sql, *args):
"""
执行update语句,返回update的行数
:param sql:
:param args:
:return:
"""
return _update(sql, *args)
|
700a155dbdcc77d824ce14eb95b7f041a0dda261
| 31,184 |
def videoSize():
"""
videoSize() -> (width, height)
Get the camera resolution.
Returns: A map with two integer values, i.e. width and height.
Parameters: This method does not have any parameter.
Usage: width, heigh = SIGBTools.videoSize()
size = SIGBTools.videoSize()
"""
return CaptureManager.Instance.Size
|
58757851dbbde75a1e1fef201d3ed2decee8cd06
| 31,185 |
import hashlib
def generate_md5_token_from_dict(input_params):
""" Generate distinct md5 token from a dictionary.
初衷是为了将输入一个函数的输入参数内容编码为独一无二的md5编码, 方便在其变动的时候进行检测.
Parameters
----------
input_params : dict
Dictionary to be encoded.
Returns
-------
str
Encoded md5 token from input_params.
"""
input_params_token = ""
# print(">>"*88)
# print(input_params)
# print(">>"*88)
for v in list(input_params["kwargs"].values()) + list(input_params["args"]) + list(input_params["feature_path"]):
if type(v) in [pd.DataFrame, pd.Series]:
input_params_token += "pandas_" + str(v.memory_usage().sum()) + "_" + str(v.shape) + "_"
elif type(v) in [np.ndarray]:
input_params_token += "numpy_" + str(v.mean()) + "_" + str(v.shape) + "_"
elif type(v) in [list, tuple, set]:
input_params_token += "list_" + str(v) + "_"
elif type(v) == str:
input_params_token += "str_" + v + "_"
elif type(v) in [int, float]:
input_params_token += "numeric_" + str(v) + "_"
elif type(v) == bool:
input_params_token += "bool_" + str(v) + "_"
elif type(v) == dict:
input_params_token += "dict_" + str(v) + "_"
else:
raise "Add type {}".format(type(v))
m = hashlib.md5(input_params_token.encode("gb2312")).hexdigest()
return m
|
2faf66679a72c6fd4f2e02f3ddef728b3bab687e
| 31,186 |
import re
def create_range(range_, arrayTest, headers_suppresed = True):
"""
Creates a range string from a start and end value
'A1','A4' becomes 'A1,A2,A3,A4'
"""
result_string = []
first_cell = range_.split(':')[0]
last_cell = range_.split(':')[1]
column_start = re.search(r'[a-zA-Z]+', first_cell).group()
column_end = re.search(r'[a-zA-Z]+', last_cell).group()
row_start = int(re.search(r'\d+', first_cell).group())
row_end = int(re.search(r'\d+', last_cell).group())
for i in range(col_to_num(column_start), col_to_num(column_end)+1):
for j in range(row_start, row_end+1):
if headers_suppresed:
result_string.append(str(arrayTest[j-1][i-1]))
else:
result_string.append(str(arrayTest[j-2][i-1]))
return '[' + ','.join(result_string) + ']'
|
5aff1450946ee12ab943a24491a9947e6d2fa830
| 31,187 |
def find_largest_helper(n, maximum):
"""
:param n: int, the number to find the largest digit
:param maximum: int, the largest digit
:return: int, the largest digit
"""
if n % 10 > maximum:
maximum = n % 10
if n / 10 == 0:
return maximum
else:
return find_largest_helper(n // 10, maximum)
|
ddd49839be6a3ab6ece7cabde41f3978df1ba6f3
| 31,188 |
def global_align(seq1_1hot, seq2_1hot):
"""Align two 1-hot encoded sequences."""
align_opts = {'gap_open_penalty':10, 'gap_extend_penalty':1, 'match_score':5, 'mismatch_score':-4}
seq1_dna = DNA(dna_io.hot1_dna(seq1_1hot))
seq2_dna = DNA(dna_io.hot1_dna(seq2_1hot))
# seq_align = global_pairwise_align_nucleotide(seq1_dna, seq2_dna, *align_opts)[0]
seq_align = global_pairwise_align_nucleotide(seq1_dna, seq2_dna, gap_open_penalty=10, gap_extend_penalty=1, match_score=5, mismatch_score=-4)[0]
seq1_align = str(seq_align[0])
seq2_align = str(seq_align[1])
return seq1_align, seq2_align
|
35cc261f43c116a96fbec9e409875b8131ff76a3
| 31,189 |
import math
def get_points_dist(pt1, pt2):
"""
Returns the distance between a pair of points.
get_points_dist(Point, Point) -> number
Parameters
----------
pt1 : a point
pt2 : the other point
Attributes
----------
Examples
--------
>>> get_points_dist(Point((4, 4)), Point((4, 8)))
4.0
>>> get_points_dist(Point((0, 0)), Point((0, 0)))
0.0
"""
return math.hypot(pt1[0] - pt2[0], pt1[1] - pt2[1])
|
c517833dec161585c336289149a8c56461fe7642
| 31,190 |
def get_source_type(*, db_session: Session = Depends(get_db), source_type_id: PrimaryKey):
"""Given its unique ID, retrieve details about a single source type."""
source_type = get(db_session=db_session, source_type_id=source_type_id)
if not source_type:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=[{"msg": "The requested source type does not exist."}],
)
return source_type
|
31eeda78aa3dff35618cc2e57092449e47645702
| 31,191 |
def load_data(messages_filepath, categories_filepath):
""" Get the messages and categories from CSV files. """
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
return messages.merge(categories, on='id', how='left')
|
7efafc6a51b1e3dd02e3df8926fbf4ea92f8cb39
| 31,193 |
def apply_meth(dna, meth):
"""Transforms DNA sequence to methylated DNA sequence
Input:
dna (np.array): array of DNA sequence
meth (np.array): Methylation labels (len == len(dna))
Output:
dna_meth (np.array): array of methylated DNA sequence
"""
dna_meth = dna.copy()
img = True if dna.shape[1] == 4 else False
assert is_complementary(dna, img=img), f'DNA needs to be unpacked first'
mask = meth.sum(axis=1) > 0
if img:
assert dna.shape == meth.shape, f'{dna.shape} != {meth.shape}'
dna_meth[mask] = 0
dna_meth = np.concatenate((dna, meth), axis=1)
else:
dna_meth[mask] = dna[mask]+4
return dna_meth
|
e835047651970dd3add0a463a235209b772411fd
| 31,194 |
def apply_affine_3D(coords_3d, affine_matrix):
"""
Apply an affine transformation to all coordinates.
Parameters
----------
coords_3d: numpy 2D array
The source coordinates, given as a 2D numpy array with shape (n, 3). Each of the n rows represents a point in space, given by its x, y and z coordinates.
affine_matrix: numpy 2D float array with shape (4, 4)
The affine matrix
Returns
-------
The coordinates after applying the matrix, 2D numpy array with shape (n, 3). Same shape as the input coords.
"""
rotation = affine_matrix[:3, :3]
translation = affine_matrix[:3, 3]
res = np.zeros((coords_3d.shape[0], 3))
for idx, row in enumerate(coords_3d):
res[idx,:] = rotation.dot(row) + translation
return res
|
2b06901c4428075800e919579ab07c7577c6d9a0
| 31,195 |
def weighted_degree_kernel_pos_inv(x1, x2, K=4, var=8, beacon=None, bin=None):
"""
Weighted degree kernel with positional invariance
:param x1:
Sequence of characters.
:param x2:
Sequence of characters.
:param K:
K-mers to be scanned.
:param beacon:
Beacon sequence (tuple of characters).
If set, K is equal to beacon length and only beacons are counted.
:param bin
tuple (bin, number of all bins)
Run kernel only in specified bin.
Make sure sequences are of equal length!
:return:
Gram matrix.
"""
G = 0
if bin:
assert len(x1) == len(x2)
b, b_all = bin
if not isinstance(beacon, type(None)):
K = len(beacon)
if isinstance(beacon, str):
beacon = tuple(beacon)
for Kt in range(2, K + 1):
g = 0
kmers_i = zip(*[x1[k:] for k in range(Kt)])
kmers_j = zip(*[x2[k:] for k in range(Kt)])
if bin:
start = int(float(b)/b_all * len(kmers_i))
end = int(float(b+1)/b_all * len(kmers_j))
kmers_i = kmers_i[start:end]
kmers_j = kmers_j[start:end]
bin_norm = float(len(kmers_i)) if bin else 1
for s in range(var):
delta = 1.0 / (2*(s+1))
if isinstance(beacon, type(None)):
mu_i = np.sum([ki == kj for ki, kj in zip(kmers_i, kmers_j[s:])])
mu_j = np.sum([ki == kj for ki, kj in zip(kmers_j, kmers_i[s:])])
g += delta * (mu_i + mu_j)
else:
if Kt != len(beacon):
continue
else:
mu_i = np.sum([beacon == ki == kj for ki, kj in zip(kmers_i, kmers_j[s:])])
mu_j = np.sum([beacon == ki == kj for ki, kj in zip(kmers_j, kmers_i[s:])])
g += delta * (mu_i + mu_j)
beta = 2.0 * (K - Kt + 1) / (Kt * (Kt + 1.0)) / bin_norm
G += beta * g
return G
|
01403c7ca780c272b7b6ea37d291d79f2515edb6
| 31,196 |
import ast
def is_valid_block_variable_definition(node: _VarDefinition) -> bool:
"""Is used to check either block variables are correctly defined."""
if isinstance(node, ast.Tuple):
return all(
_is_valid_single(var_definition)
for var_definition in node.elts
)
return _is_valid_single(node)
|
9be2e583353bed1910b2dfe70bba2dfd9fe96328
| 31,197 |
def julian_day_to_gregorian(julian_day):
"""Converts a Julian Day number to its (proleptic) Gregorian calendar equivalent
Adapted from: https://en.wikipedia.org/wiki/Julian_day#Julian_or_Gregorian_calendar_from_Julian_day_number
Note that the algorithm is only valid for Julian Day numbers greater than or
equal to zero. Negative arguments for julian_day will raise a ValueError.
Args:
julian_day (int): Julian Day number to convert, must be greater than or
equal to 0
Returns:
A (day, month, year) tuple representing the day, month, and year in the
Gregorian calendar.
"""
day, month, year = _convert_julian_day(julian_day, mode="gregorian")
return GregorianDate(day, month, year)
|
1544f6cd5abee1f5cef3b5befbf24f82c8d26e44
| 31,198 |
def isnotebook():
"""Identify shell environment."""
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
if shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
# None of the above: other type ?
return False
except NameError:
return False # Probably standard Python interpreter
|
4620d8275c10f5aeb8e13406c897b5b93313de97
| 31,199 |
def erratic_leveling(target_level: int) -> int:
"""
Non-trivial calculation of experience to next level for an erratic leveling curve.
Args:
target_level (int): the level to reach.
Returns:
The amount of experience to reach this level from the ground up (from experience 0),
according to an erractic leveling curve.
"""
if target_level <= 50:
return (target_level ** 3) * (100 - target_level) / 50
elif 51 <= target_level <= 68:
return (target_level ** 3) * (150 - target_level) / 100
elif 69 <= target_level <= 98:
if target_level % 3 == 0:
return round((target_level ** 3) * (1.274 - target_level / 150), 3)
elif target_level % 3 == 1:
return round((target_level ** 3) * (1.274 - target_level / 150 - 0.008), 3)
else:
return round((target_level ** 3) * (1.274 - target_level / 150 - 0.014), 3)
elif 99 <= target_level <= 100:
return (target_level ** 3) * (160 - target_level) / 100
else:
logger.error(
f"An invalid target level was provided: {target_level} which is higher than "
f"the highest reachable level (100)."
)
raise ValueError("Invalid target level: too high.")
|
0841aed503226932ebd49a66cdd42665eee265b2
| 31,200 |
import re
def get_puppetfile_tags(puppetfile):
"""
obtain tags from Puppetfile
:return: tuple(list, list)
"""
regex_vcs = re.compile(r"^:(git|svn)\s+=>\s+['\"](.+)['\"]\,", re.I)
regex_tag = re.compile(r"^:(ref|tag|commit|branch)\s+=>\s+['\"](.+)['\"]\,?", re.I)
vcss = []
tags = []
with open(puppetfile) as f:
for line in f:
match_vcs = regex_vcs.match(line.strip())
if match_vcs:
vcss.append(match_vcs.group(2))
match_tag = regex_tag.match(line.strip())
if match_tag:
tags.append(match_tag.group(2))
if len(vcss) == len(tags):
return vcss, tags
|
6beec37d4c8a3a3b9a2c845cea0f5e12e18af620
| 31,201 |
def inf_is_wide_high_byte_first(*args):
"""
inf_is_wide_high_byte_first() -> bool
"""
return _ida_ida.inf_is_wide_high_byte_first(*args)
|
13f30f49823e792ec83fb4b50266c72ceeed942c
| 31,202 |
def rewrite_and_sanitize_link(link_header):
"""Sanitize and then rewrite a link header."""
return rewrite_links(sanitize_link(link_header))
|
907cc1492be7162611408200ad660e1a49dd5e14
| 31,203 |
def user_info():
"""
用户个人中心页面显示
:return:
"""
user = g.user
if not user:
return redirect("/")
data = {
"user": user.to_dict()
}
return render_template("news/user.html", data=data)
|
2cbe80c6086bffbb5e147ee756b7b393b546da99
| 31,204 |
def cartesian2polar(state: CartesianState, state_goal : CartesianState) -> PolarState:
"""
rho is the distance between the robot and the goal position
: \sqrt((x*-x)^2 + (y*-y)^2)
alpha is the heading of the robot relative the angle to the goal
: theta - atan2((y*-y),(x*-x))
beta is the goal position relative to the angle to the goal
: theta* - atan2((y*-y),(x*-x))
>>> state = np.random.rand(3)* np.array([2, 2, 2*np.pi]) - np.array([1, 1, np.pi])
>>> state_goal = np.random.rand(3)* np.array([2, 2, 2*np.pi]) - np.array([1, 1, np.pi])
>>> polar = cartesian2polar(state, state_goal)
>>> statep = polar2cartesian(polar, state_goal)
>>> np.testing.assert_allclose(state, statep)
"""
x, y, theta = state
x_goal, y_goal, theta_goal = state_goal
x_diff = x_goal - x
y_diff = y_goal - y
# reparameterization
rho = np.hypot(x_diff, y_diff)
phi = np.arctan2(y_diff, x_diff)
alpha = angdiff(theta, phi)
beta = angdiff(theta_goal , phi)
return np.array((rho, alpha, beta))
|
92eea79a8ac8f7c83e78d9aaaff3d6af500b9876
| 31,205 |
def organize_array_by_rows(unformatted_array, num_cols):
"""Take unformatted array and make grid array"""
num_rows = int(len(unformatted_array) / num_cols)
array = []
for row in range(num_rows):
array.append(unformatted_array[row * num_cols:(row + 1) * num_cols])
return array
|
8a7d74ea593bfcc5c4d3a92d1c192b2bf628f641
| 31,206 |
from typing import Union
from typing import Literal
from typing import Sequence
def group_abundance(
adata: AnnData,
groupby: str,
target_col: str = "has_ir",
*,
fraction: Union[None, str, bool] = None,
sort: Union[Literal["count", "alphabetical"], Sequence[str]] = "count",
) -> pd.DataFrame:
"""Summarizes the number/fraction of cells of a certain category by a certain group.
Ignores NaN values.
Parameters
----------
adata
AnnData object to work on.
groupby
Group by this column from `obs`. E.g, sample, or group.
target_col
Caregorical variable from `obs` according to which the abundance/fractions
will be computed. This defaults to "has_ir", simply counting
the number of cells with a detected :term:`IR` by group.
fraction
If `True`, compute fractions of abundances relative to the `groupby` column
rather than reporting abosolute numbers. Alternatively, a column
name can be provided according to that the values will be normalized.
sort
How to arrange the dataframe columns.
Default is by the category count ("count").
Other options are "alphabetical" or to provide a list of column names.
By providing an explicit list, the DataFrame can also be subsetted to
specific categories.
Returns
-------
Returns a data frame with the number (or fraction) of cells per group.
"""
if target_col not in adata.obs.columns:
raise ValueError("`target_col` not found in obs`")
ir_obs = adata.obs
return _group_abundance(
ir_obs, groupby, target_col=target_col, fraction=fraction, sort=sort
)
|
adfc5047349ec5fcffdc05de0ae2ecdfbf9b8b6c
| 31,207 |
def infer(model, text_sequences, input_lengths):
"""
An inference hook for pretrained synthesizers
Arguments
---------
model: Tacotron2
the tacotron model
text_sequences: torch.Tensor
encoded text sequences
input_lengths: torch.Tensor
input lengths
Returns
-------
result: tuple
(mel_outputs_postnet, mel_lengths, alignments) - the exact
model output
"""
return model.infer(text_sequences, input_lengths)
|
e7937395956e2dcd35dd86bc23599fbb63417c22
| 31,208 |
def build_info(image, spack_version):
"""Returns the name of the build image and its tag.
Args:
image (str): image to be used at run-time. Should be of the form
<image_name>:<image_tag> e.g. "ubuntu:18.04"
spack_version (str): version of Spack that we want to use to build
Returns:
A tuple with (image_name, image_tag) for the build image
"""
# Don't handle error here, as a wrong image should have been
# caught by the JSON schema
image_data = data()["images"][image]
build_image = image_data.get('build', None)
if not build_image:
return None, None
# Translate version from git to docker if necessary
build_tag = image_data['build_tags'].get(spack_version, spack_version)
return build_image, build_tag
|
bb09a530e2fdf50b78225647df1238ae08fe5b3d
| 31,209 |
def _get_data_attr(data, attr):
"""Get data object field."""
if isinstance(data, dict):
# `Data` object's id is hydrated as `__id` in expression engine
data = data["__id"]
data_obj = Data.objects.get(id=data)
return getattr(data_obj, attr)
|
bdc90d01172655f77680f0c373ed609b4100e874
| 31,210 |
def get_user_project(user, dds_project_id):
"""
Get a single Duke DS Project for a user
:param user: User who has DukeDS credentials
:param dds_project_id: str: duke data service project id
:return: DDSProject: project details
"""
try:
remote_store = get_remote_store(user)
project = remote_store.data_service.get_project_by_id(dds_project_id).json()
return DDSProject(project)
except DataServiceError as dse:
raise WrappedDataServiceException(dse)
|
71649da2b092954d8f7d65059edd75fc18a8e750
| 31,211 |
def _event_split(elist):
"""Split event list into dictionary of event keywords
"""
eventdict = dict()
dictkeys = (roxar.EventType.WLIMRATE,
roxar.EventType.WLIMPRES,
roxar.EventType.WLIMRATIO,
roxar.EventType.WHISTRATE,
roxar.EventType.WHISTPRES,
roxar.EventType.WCONTROL,
roxar.EventType.WTYPE,
roxar.EventType.WSEGMOD,
roxar.EventType.WSEGSEG,
roxar.EventType.GCONTROL,
roxar.EventType.GMEMBER,
roxar.EventType.GLIMRATE,
roxar.EventType.GLIMPRES,
roxar.EventType.GLIMRATIO,
roxar.EventType.PERF,
roxar.EventType.CPERF,
roxar.EventType.SQUEEZE,
roxar.EventType.TUBING)
for d in dictkeys:
eventdict[d] = []
for ev in elist:
if ev.type in dictkeys:
eventdict[ev.type].append(ev)
return eventdict
|
8097fdee8b36881b0c5a4851165ac57f70482415
| 31,212 |
import random
import re
def generate_reply(utt, dais):
"""Generate a reply task for the given utterance and DAIs list."""
ret = DataLine(dat='reply', abstr_utt=utt, abstr_da='&'.join([unicode(dai) for dai in dais]))
utt, dais = deabstract(utt, dais)
# offer a ride (meeting the specifications in dais)
if all([dai.dat in ['inform', 'confirm'] for dai in dais]):
info = {dai.name: dai.value for dai in dais}
if 'vehicle' not in info:
info['vehicle'] = random.choice(['subway', 'bus'])
if info['vehicle'] == 'subway':
info['line'] = random.choice('1234567ABCDEFGJLMNQRZ')
else:
info['line'] = 'M' + str(random.choice(BUS_LINES))
if 'ampm' not in info:
if 'time' in info:
time_val, _ = info['time'].split(':')
time_val = int(time_val)
if time_val < 7 or time_val == 12:
info['ampm'] = 'pm'
if 'ampm' not in info:
info['ampm'] = random.choice(['am', 'pm'])
if 'departure_time' not in info:
if 'time' in info:
info['departure_time'] = info['time']
del info['time']
elif info['ampm'] == 'am':
info['departure_time'] = str(random.choice(range(7, 12))) + ':00'
else:
info['departure_time'] = str(random.choice(range(1, 13))) + ':00'
if 'from_stop' not in info:
info['from_stop'] = random.choice(STOPS)
if 'to_stop' not in info:
remaining_stops = list(STOPS)
remaining_stops.remove(info['from_stop'])
info['to_stop'] = random.choice(remaining_stops)
info['direction'] = info['to_stop']
del info['to_stop']
info['departure_time'] = re.sub(r'00$', '%02d' % random.choice(range(20)),
info['departure_time'])
info['departure_time'] += info['ampm']
del info['ampm']
for slot_name in ['departure_time_rel', 'time_rel',
'alternative', 'arrival_time', 'arrival_time_rel']:
if slot_name in info:
del info[slot_name]
dais_str = [slot + '=' + value for slot, value in info.iteritems()]
random.shuffle(dais_str)
dais_str = ', '.join(dais_str)
# offer additional information
else:
dais_str = ''
if any([dai.name == 'distance' and dai.dat == 'request' for dai in dais]):
dais_str += ', distance=%3.1f miles' % (random.random() * 12)
if any([dai.name == 'num_transfers' and dai.dat == 'request' for dai in dais]):
dais_str += ', num_transfers=%d' % random.choice(range(0, 3))
if any([dai.name == 'duration' and dai.dat == 'request' for dai in dais]):
dais_str += ', duration=%d minutes' % random.choice(range(10, 80))
if any([dai.name == 'departure_time' and dai.dat == 'request' for dai in dais]):
hr, ampm = random_hour()
min = random.choice(range(60))
dais_str += ', departure_time=%d:%02d%s' % (hr, min, ampm)
if any([dai.name == 'arrival_time' and dai.dat == 'request' for dai in dais]): # arrival_time_rel does not occur
hr, ampm = random_hour()
min = random.choice(range(60))
dais_str += ', arrival_time=%d:%02d%s' % (hr, min, ampm)
if dais_str == '':
raise NotImplementedError('Cannot generate a reply for: ' + unicode(dais))
dais_str = dais_str[2:]
ret.utt = utt
ret.da = dais_str
return ret
|
9a9d1a7271b03e01e492be830e29725399f61387
| 31,214 |
def create_concept_graphs(example_indices, grakn_session):
"""
Builds an in-memory graph for each example, with an example_id as an anchor for each example subgraph.
Args:
example_indices: The values used to anchor the subgraph queries within the entire knowledge graph
grakn_session: Grakn Session
Returns:
In-memory graphs of Grakn subgraphs
"""
graphs = []
infer = True
for example_id in example_indices:
print(f'Creating graph for example {example_id}')
graph_query_handles = get_query_handles(example_id)
with grakn_session.transaction().read() as tx:
# Build a graph from the queries, samplers, and query graphs
graph = build_graph_from_queries(graph_query_handles, tx, infer=infer)
obfuscate_labels(graph, TYPES_AND_ROLES_TO_OBFUSCATE)
graph.name = example_id
graphs.append(graph)
return graphs
|
66d5d13fad865e6d6437eb29d20e611e509ad7f7
| 31,215 |
def GetChange(host, change):
"""Queries a Gerrit server for information about a single change."""
path = 'changes/%s' % change
return _SendGerritJsonRequest(host, path)
|
3f4c7c3554fdbba0cc6bc0c8c513823859d22d61
| 31,216 |
def block_shape(f):
"""
find the block shape (nxb, nyb, nzb) given the hdf5 file f
returns
dimension, (nxb, nyb, nzb)
"""
if 'integer scalars' in f.root:
params = f.getNode(f.root, 'integer scalars').read()
p_dict = dict((name.rstrip(), val) for name, val in params)
dimension = p_dict['dimensionality']
nb = empty(dimension, dtype=int32)
for i,par in enumerate(['nxb', 'nyb', 'nzb'][:dimension]):
nb[i]= p_dict[par]
else:
print dir(f.getNode(f.root, 'block size'))
dimension = 3
params = f.getNode(f.root, 'simulation parameters')
nb = empty(dimension, dtype=int32)
for i in range(dimension):
nb[i] = params[0][5+i]
return dimension, nb
|
ce7e3f58400185fa76855dc809f78867905915bc
| 31,218 |
def model_init(rng_key, batch, encoder_sizes=(1000, 500, 250, 30)):
"""Initialize the standard autoencoder."""
x_size = batch.shape[-1]
decoder_sizes = encoder_sizes[len(encoder_sizes) - 2::-1]
sizes = (x_size,) + encoder_sizes + decoder_sizes + (x_size,)
keys = jax.random.split(rng_key, len(sizes) - 1)
params = []
for rng_key, dim_in, dim_out in zip(keys, sizes, sizes[1:]):
# Glorot uniform initialization
w = glorot_uniform((dim_in, dim_out), rng_key)
b = jnp.zeros([dim_out])
params.append((w, b))
return params, None
|
937aa19a7bac1fd1e90e6ef7d7027dcb3822dcc8
| 31,219 |
def _make_rotation_matrix(vector_1,vector_2):
"""" Generates the rotation matrix from vector_1 to vector_2"""
# Use formula for rotation matrix: R = I + A + A^2 * b
# https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d
v = np.cross(vector_1,vector_2)
c = np.dot(vector_1,vector_2)
s = np.linalg.norm(v)
b = (1-c)/s**2
# Generate rotation matrix
A = np.zeros((3,3))
A[0][1] += -v[2]
A[0][2] += v[1]
A[1][2] += -v[0]
A[1][0] += v[2]
A[2][0] += -v[1]
A[2][1] += v[0]
B = np.dot(A,A)
I = np.identity(3)
R = I + A + b * B
return(R)
|
17c24c4c4e6c8378b65076686f4d80736d6ccf3e
| 31,221 |
def get_models(models='all'):
"""
Returns model names as a list
Parameters
----------
models: str
OPTIONAL. Default value is 'all' in which case all keys in defaule_models are returned.
If 'mixed' is passed, only the MixedFluid model names are returned.
"""
if models == 'all':
return list(default_models.keys())
if models == 'mixed':
return ['Shishkina', 'Dixon', 'IaconoMarziano', 'Liu']
|
dcf0a00946f3146e5511825d875947bb5278be6a
| 31,222 |
def other_language_code():
"""Language code used for testing, currently not set by user."""
return 'de-DE'
|
2cbac23cd7a13e71991be6516a3a38dee19ae690
| 31,223 |
import numpy
def do_novelty_detection(
baseline_image_matrix, test_image_matrix, image_normalization_dict,
predictor_names, cnn_model_object, cnn_feature_layer_name,
ucn_model_object, num_novel_test_images,
percent_svd_variance_to_keep=97.5):
"""Does novelty detection.
Specifically, this method follows the procedure in Wagstaff et al. (2018)
to determine which images in the test set are most novel with respect to the
baseline set.
NOTE: Both input and output images are (assumed to be) denormalized.
B = number of baseline examples (storm objects)
T = number of test examples (storm objects)
M = number of rows in each storm-centered grid
N = number of columns in each storm-centered grid
C = number of channels (predictor variables)
:param baseline_image_matrix: B-by-M-by-N-by-C numpy array of baseline
images.
:param test_image_matrix: T-by-M-by-N-by-C numpy array of test images.
:param image_normalization_dict: See doc for `normalize_images`.
:param predictor_names: length-C list of predictor names.
:param cnn_model_object: Trained CNN model (instance of
`keras.models.Model`). Will be used to turn images into scalar
features.
:param cnn_feature_layer_name: The "scalar features" will be the set of
activations from this layer.
:param ucn_model_object: Trained UCN model (instance of
`keras.models.Model`). Will be used to turn scalar features into
images.
:param num_novel_test_images: Number of novel test images to find.
:param percent_svd_variance_to_keep: See doc for `_fit_svd`.
:return: novelty_dict: Dictionary with the following keys. In the following
discussion, Q = number of novel test images found.
novelty_dict['novel_image_matrix_actual']: Q-by-M-by-N-by-C numpy array of
novel test images.
novelty_dict['novel_image_matrix_upconv']: Same as
"novel_image_matrix_actual" but reconstructed by the upconvnet.
novelty_dict['novel_image_matrix_upconv_svd']: Same as
"novel_image_matrix_actual" but reconstructed by SVD (singular-value
decomposition) and the upconvnet.
:raises: TypeError: if `image_normalization_dict is None`.
"""
if image_normalization_dict is None:
error_string = (
'image_normalization_dict cannot be None. Must be specified.')
raise TypeError(error_string)
num_test_examples = test_image_matrix.shape[0]
baseline_image_matrix_norm, _ = normalize_images(
predictor_matrix=baseline_image_matrix + 0.,
predictor_names=predictor_names,
normalization_dict=image_normalization_dict)
test_image_matrix_norm, _ = normalize_images(
predictor_matrix=test_image_matrix + 0.,
predictor_names=predictor_names,
normalization_dict=image_normalization_dict)
baseline_feature_matrix = _apply_cnn(
cnn_model_object=cnn_model_object,
predictor_matrix=baseline_image_matrix_norm, verbose=False,
output_layer_name=cnn_feature_layer_name)
test_feature_matrix = _apply_cnn(
cnn_model_object=cnn_model_object,
predictor_matrix=test_image_matrix_norm, verbose=False,
output_layer_name=cnn_feature_layer_name)
novel_indices = []
novel_image_matrix_upconv = None
novel_image_matrix_upconv_svd = None
for k in range(num_novel_test_images):
print('Finding {0:d}th of {1:d} novel test images...'.format(
k + 1, num_novel_test_images))
if len(novel_indices) == 0:
this_baseline_feature_matrix = baseline_feature_matrix + 0.
this_test_feature_matrix = test_feature_matrix + 0.
else:
novel_indices_numpy = numpy.array(novel_indices, dtype=int)
this_baseline_feature_matrix = numpy.concatenate(
(baseline_feature_matrix,
test_feature_matrix[novel_indices_numpy, ...]),
axis=0)
this_test_feature_matrix = numpy.delete(
test_feature_matrix, obj=novel_indices_numpy, axis=0)
svd_dictionary = _fit_svd(
baseline_feature_matrix=this_baseline_feature_matrix,
test_feature_matrix=this_test_feature_matrix,
percent_variance_to_keep=percent_svd_variance_to_keep)
svd_errors = numpy.full(num_test_examples, numpy.nan)
test_feature_matrix_svd = numpy.full(
test_feature_matrix.shape, numpy.nan)
for i in range(num_test_examples):
print(i)
if i in novel_indices:
continue
test_feature_matrix_svd[i, ...] = _apply_svd(
feature_vector=test_feature_matrix[i, ...],
svd_dictionary=svd_dictionary)
svd_errors[i] = numpy.linalg.norm(
test_feature_matrix_svd[i, ...] - test_feature_matrix[i, ...]
)
new_novel_index = numpy.nanargmax(svd_errors)
novel_indices.append(new_novel_index)
new_image_matrix_upconv = ucn_model_object.predict(
test_feature_matrix[[new_novel_index], ...], batch_size=1)
new_image_matrix_upconv_svd = ucn_model_object.predict(
test_feature_matrix_svd[[new_novel_index], ...], batch_size=1)
if novel_image_matrix_upconv is None:
novel_image_matrix_upconv = new_image_matrix_upconv + 0.
novel_image_matrix_upconv_svd = new_image_matrix_upconv_svd + 0.
else:
novel_image_matrix_upconv = numpy.concatenate(
(novel_image_matrix_upconv, new_image_matrix_upconv), axis=0)
novel_image_matrix_upconv_svd = numpy.concatenate(
(novel_image_matrix_upconv_svd, new_image_matrix_upconv_svd),
axis=0)
novel_indices = numpy.array(novel_indices, dtype=int)
novel_image_matrix_upconv = denormalize_images(
predictor_matrix=novel_image_matrix_upconv,
predictor_names=predictor_names,
normalization_dict=image_normalization_dict)
novel_image_matrix_upconv_svd = denormalize_images(
predictor_matrix=novel_image_matrix_upconv_svd,
predictor_names=predictor_names,
normalization_dict=image_normalization_dict)
return {
NOVEL_IMAGES_ACTUAL_KEY: test_image_matrix[novel_indices, ...],
NOVEL_IMAGES_UPCONV_KEY: novel_image_matrix_upconv,
NOVEL_IMAGES_UPCONV_SVD_KEY: novel_image_matrix_upconv_svd
}
|
69181690b81a987b45dcdbea5b0febe8928b365b
| 31,224 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.