content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def dcm_to_unrescaled(dcm_dict, save_path=None, show=True, return_resolution=False):
"""
just stack dcm files together
:param return_resolution:
:param show:
:param dcm_dict:
:param save_path: the save path for stacked array
:return: the stacked array in float32
"""
array_stacked, resolution = read_in_CT.stack_dcm_files_simplest(dcm_dict, show=show)
if save_path is not None:
if show:
print("save array to:", save_path)
Functions.save_np_to_path(save_path, array_stacked)
if return_resolution:
return array_stacked, resolution
return array_stacked
|
b4954c4f89100093b501d6e662a8b03eb247039b
| 29,469 |
def change_unit_of_metrics(metrics):
"""Change order of metrics from bpd to nats for binarized mnist only"""
if hparams.data.dataset_source == 'binarized_mnist':
# Convert from bpd to nats for comparison
metrics['kl_div'] = metrics['kl_div'] * jnp.log(2.) * get_effective_n_pixels()
metrics['avg_kl_divs'] = jax.tree_map(lambda x: x * jnp.log(2.) * get_effective_n_pixels(), metrics['avg_kl_divs'])
metrics['avg_recon_loss'] = metrics['avg_recon_loss'] * jnp.log(2.) * get_effective_n_pixels()
return metrics
|
0435a4caf8c82587f84fb05ae493e43654bdf22e
| 29,471 |
def step(ram: dict, regs: dict, inputs: list[int]) -> tuple:
"""Advance robot by a single step
:param ram: memory contents
:param regs: register map
:param inputs: input queue
:return: updated pc; new color and turn direction
"""
pc = regs['pc']
relative_base = regs['rb']
output_values = list()
while True:
instruction = ram[pc]
opcode, operand_modes = decode(instruction=instruction)
halt = ISA[opcode].name == 'Halt'
if halt:
raise HaltOpcode
load_modes = operand_modes[:ISA[opcode].load_args]
operands = fetch(instruction_pointer=pc,
load_modes=load_modes, ram=ram,
relative_base=relative_base,
opcode=opcode, input_stack=inputs)
output = execute(opcode=opcode, operands=operands)
store_mode = operand_modes[-ISA[opcode].store_args:][0]
store(opcode=opcode, store_mode=store_mode, output=output,
instruction_pointer=pc, ram=ram,
relative_base=relative_base)
output_values.extend(push_output(opcode=opcode, output=output))
relative_base += shift_base(opcode=opcode, output=output)
next_instruction_pointer = jump_next_instruction(
opcode=opcode, instruction_pointer=pc, operands=operands)
pc = next_instruction_pointer
if len(output_values) == 2:
break
regs['pc'] = pc
regs['rb'] = relative_base
return tuple(output_values)
|
4df0395e88a5ccd9f34edd39ea0841d16df6838a
| 29,473 |
def replace_start(text,
pattern,
repl,
ignore_case=False,
escape=True):
"""Like :func:`replace` except it only replaces `text` with `repl` if
`pattern` mathces the start of `text`.
Args:
text (str): String to replace.
pattern (str): String pattern to find and replace.
repl (str): String to substitute `pattern` with.
ignore_clase (bool, optional): Whether to ignore case when replacing.
Defaults to ``False``.
escape (bool, optional): Whether to escape `pattern` when searching.
This is needed if a literal replacement is desired when `pattern`
may contain special regular expression characters. Defaults to
``True``.
Returns:
str: Replaced string.
Example:
>>> replace_start('aabbcc', 'b', 'X')
'aabbcc'
>>> replace_start('aabbcc', 'a', 'X')
'Xabbcc'
.. versionadded:: 4.1.0
"""
return replace(text,
pattern,
repl,
ignore_case=ignore_case,
escape=escape,
from_start=True)
|
2296503a1c97cc06fa1fcc3768f54595fbc09940
| 29,474 |
import copy
def copy_excel_cell_range(
src_ws: openpyxl.worksheet.worksheet.Worksheet,
min_row: int = None,
max_row: int = None,
min_col: int = None,
max_col: int = None,
tgt_ws: openpyxl.worksheet.worksheet.Worksheet = None,
tgt_min_row: int = 1,
tgt_min_col: int = 1,
with_style: bool = True
) -> openpyxl.worksheet.worksheet.Worksheet:
"""
copies all cells from the source worksheet [src_ws] starting from [min_row] row
and [min_col] column up to [max_row] row and [max_col] column
to target worksheet [tgt_ws] starting from [tgt_min_row] row
and [tgt_min_col] column.
@param src_ws: source worksheet
@param min_row: smallest row index in the source worksheet (1-based index)
@param max_row: largest row index in the source worksheet (1-based index)
@param min_col: smallest column index in the source worksheet (1-based index)
@param max_col: largest column index in the source worksheet (1-based index)
@param tgt_ws: target worksheet.
If None, then the copy will be done to the same (source) worksheet.
@param tgt_min_row: target row index (1-based index)
@param tgt_min_col: target column index (1-based index)
@param with_style: whether to copy cell style. Default: True
@return: target worksheet object
"""
if tgt_ws is None:
tgt_ws = src_ws
# https://stackoverflow.com/a/34838233/5741205
for row in src_ws.iter_rows(min_row=min_row, max_row=max_row,
min_col=min_col, max_col=max_col):
for cell in row:
tgt_cell = tgt_ws.cell(
row=cell.row + tgt_min_row - 1,
column=cell.col_idx + tgt_min_col - 1,
value=cell.value
)
if with_style and cell.has_style:
# tgt_cell._style = copy(cell._style)
tgt_cell.font = copy(cell.font)
tgt_cell.border = copy(cell.border)
tgt_cell.fill = copy(cell.fill)
tgt_cell.number_format = copy(cell.number_format)
tgt_cell.protection = copy(cell.protection)
tgt_cell.alignment = copy(cell.alignment)
return tgt_ws
|
b98d2dda9fa0915dcb7bc3f4b1ff1049340afc68
| 29,476 |
def index(request):
"""Home page"""
return render(request, 'index.html')
|
66494cd74d1b0969465c6f90c2456b4283e7e2d3
| 29,477 |
import ast
def get_teams_selected(request, lottery_info):
""" get_teams_selected updates the teams
selected by the user
@param request (flask.request object): Object containing
args attributes
@param lottery_info (dict): Dictionary keyed by
reverse standings order, with dictionary
values containing 'name' and 'id' keys
for the team
Returns:
- teams_selected (list): Teams previously
selected by the user
"""
teams_selected = []
selections = ast.literal_eval(request.args['teams_selected'])
for val in selections:
team_name = selections[val].split(' ')[-1]
if team_name != '':
for x in range(len(lottery_info), 0, -1):
if lottery_info[x]['name'] == team_name:
teams_selected.append(x)
return teams_selected
|
35edfab322ce5ad039f869027552c664f9e6b576
| 29,478 |
from testtools import TestCase
def assert_fails_with(d, *exc_types, **kwargs):
"""Assert that ``d`` will fail with one of ``exc_types``.
The normal way to use this is to return the result of
``assert_fails_with`` from your unit test.
Equivalent to Twisted's ``assertFailure``.
:param Deferred d: A ``Deferred`` that is expected to fail.
:param exc_types: The exception types that the Deferred is expected to
fail with.
:param type failureException: An optional keyword argument. If provided,
will raise that exception instead of
``testtools.TestCase.failureException``.
:return: A ``Deferred`` that will fail with an ``AssertionError`` if ``d``
does not fail with one of the exception types.
"""
failureException = kwargs.pop('failureException', None)
if failureException is None:
# Avoid circular imports.
failureException = TestCase.failureException
expected_names = ", ".join(exc_type.__name__ for exc_type in exc_types)
def got_success(result):
raise failureException(
"%s not raised (%r returned)" % (expected_names, result))
def got_failure(failure):
if failure.check(*exc_types):
return failure.value
raise failureException("%s raised instead of %s:\n %s" % (
failure.type.__name__, expected_names, failure.getTraceback()))
return d.addCallbacks(got_success, got_failure)
|
1ff967f66c6d8e1d7f34354459d169bdfe95987a
| 29,479 |
def temporal_affine_backward(dout, cache):
"""
Backward pass for temporal affine layer.
Input:
- dout: Upstream gradients of shape (N, T, M)
- cache: Values from forward pass
Returns a tuple of:
- dx: Gradient of input, of shape (N, T, D)
- dw: Gradient of weights, of shape (D, M)
- db: Gradient of biases, of shape (M,)
"""
x, w, b, out = cache
N, T, D = x.shape
M = b.shape[0]
dx = dout.reshape(N * T, M).dot(w.T).reshape(N, T, D)
dw = dout.reshape(N * T, M).T.dot(x.reshape(N * T, D)).T
db = dout.sum(axis=(0, 1))
return dx, dw, db
|
2cf4ead02fdaa0a54f828d09166128f1b5473d0b
| 29,480 |
def _make_cmake(config_info):
"""This function initializes a CMake builder for building the project."""
configure_args = ["-DCMAKE_EXPORT_COMPILE_COMMANDS=ON"]
cmake_args = {}
options, option_fns = _make_all_options()
def _add_value(value, key):
args_key, args_value = _EX_ARG_FNS[key](value)
cmake_args[args_key] = args_value
devpipeline_core.toolsupport.args_builder(
"cmake",
config_info,
options,
lambda v, key: configure_args.extend(option_fns[key](v)),
)
devpipeline_core.toolsupport.args_builder(
"cmake", config_info, _EX_ARGS, _add_value
)
cmake = CMake(cmake_args, config_info, configure_args)
build_type = config_info.config.get("cmake.build_type")
if build_type:
cmake.set_build_type(build_type)
return devpipeline_build.make_simple_builder(cmake, config_info)
|
fdef36f0875438ed0b5544367b4cb3fb5308f43d
| 29,481 |
def box_to_delta(box, anchor):
"""((x1,y1) = upper left corner, (x2, y2) = lower right corner):
* box center point, width, height = (x, y, w, h)
* anchor center point, width, height = (x_a, y_a, w_a, h_a)
* anchor = (x1=x_a-w_a/2, y1=y_a-h_a/2, x2=x_a+w_a/2, y2=y_a+h_a/2)
* box = (x1=x-w/2, y1=y-h/2, x2=x+w/2, y2=y+w/2)
* box_delta = ((x-x_a)/w_a, (y-y_a)/h_a, log(w/w_a), log(h/h_a))
:param tuple box: box coordinates
:param tuple anchor: anchor coordinates
:return: box delta coordinates as described above
"""
# box
x1, y1, x2, y2 = box
w, h = x2 - x1, y2 - y1
x, y = x1 + w / 2, y1 + h / 2
# anchor
x1_a, y1_a, x2_a, y2_a = anchor
w_a, h_a = x2_a - x1_a, y2_a - y1_a
x_a, y_a = x1_a + w_a / 2.0, y1_a + h_a / 2.0
dx, dy = (x - x_a) / w_a, (y - y_a) / h_a
dw, dh = log(w / w_a), log(h / h_a)
return dx, dy, dw, dh
|
2033c66c89a25541af77678ab368d6f30628d0f5
| 29,482 |
def mutual_information(prob1, prob2, prob_joint):
"""
Calculates mutual information between two random variables
Arguments
------------------
prob1 (numpy array):
The probability distribution of the first variable
prob1.sum() should be 1
prob2 (numpy array):
The probability distrubiont of the second variable
Again, prob2.sum() should be 1
prob_joint (two dimensional numpy array):
The joint probability, marginazling over the
different axes should give prob1 and prob2
Returns
------------------
mutual information (scalar):
The mutual information between two variables
Examples
------------------
A mixed joint:
>>> p_joint = np.array((0.3, 0.1, 0.2, 0.4)).reshape((2, 2))
>>> p1 = p_joint.sum(axis=1)
>>> p2 = p_joint.sum(axis=0)
>>> mutual_information(p1, p2, p_joint)
0.12451124978365299
An uninformative joint:
>>> p_joint = np.array((0.25, 0.25, 0.25, 0.25)).reshape((2, 2))
>>> p1 = p_joint.sum(axis=1)
>>> p2 = p_joint.sum(axis=0)
>>> mutual_information(p1, p2, p_joint)
0.0
A very coupled joint:
>>> p_joint = np.array((0.4, 0.05, 0.05, 0.4)).reshape((2, 2))
>>> p1 = p_joint.sum(axis=1)
>>> p2 = p_joint.sum(axis=0)
>>> mutual_information(p1, p2, p_joint)
0.58387028280246378
Using the alternative definition of mutual information
>>> p_joint = np.array((0.4, 0.2, 0.1, 0.3)).reshape((2, 2))
>>> p1 = p_joint.sum(axis=1)
>>> p2 = p_joint.sum(axis=0)
>>> MI = mutual_information(p1, p2, p_joint)
>>> x1 = entropy(p1)
>>> x2 = entropy(p2)
>>> x3 = joint_entropy(p_joint)
>>> np.isclose(MI, x1 + x2 - x3)
True
"""
outer = np.outer(prob1, prob2)
return np.sum(prob_joint * np.log2(prob_joint / outer))
|
4d6d2738c84092470b83497e911e767b18878857
| 29,483 |
def hog(img, num_bins, edge_num_cells=2):
""" Histogram of oriented gradients
:param img: image to process
:param edge_num_cells: cut img into cells: 2 = 2x2, 3 = 3x3 etc.
:return:
"""
if edge_num_cells != 2:
raise NotImplementedError
w, h = img.shape[:2]
cut_x = w / 2
cut_y = h / 2
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
ang = np.int32(num_bins * ang / (2 * np.pi))
bin_cells = (ang[:cut_x, :cut_y], ang[cut_x:, :cut_y],
ang[:cut_x, cut_y:], ang[cut_x:, cut_y:])
mag_cells = (mag[:cut_x, :cut_y], mag[cut_x:, :cut_y],
mag[:cut_x, cut_y:], mag[cut_x:, cut_y:])
hists = [np.bincount(
b.ravel(), m.ravel(), num_bins) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
return hist
|
d2b7eadda978896826800c7159a8e4604b150aa6
| 29,484 |
def get_graph(adj) -> nx.classes.graph.Graph:
"""
Returns a nx graph from zero-padded adjacency matrix.
@param adj: adjustency matrix
[[0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[1. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[1. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 1. 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 1. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 1. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 1. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 1. 0. 1. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 1. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 1. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 1. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 1. 1. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0. 1.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0.]]
@return:
"""
# remove all zeros rows and columns
adj = adj[~np.all(adj == 0, axis=1)]
adj = adj[:, ~np.all(adj == 0, axis=0)]
adj = np.asmatrix(adj)
return nx.from_numpy_matrix(adj)
|
a62f1c696111e7c4f97bb6fd858fc3bc9e011f6f
| 29,485 |
def findSamplesInRage(pointIds, minVal, maxVal):
"""根据样本编号的范围[1, 10]处理ID
Args:
pointIds (ndarray): 样本的ID
minVal (Number): 样本范围的起始位置
maxVal (Number): 样本范围的结束位置
Returns:
result (ndarray): 样本的ID
"""
digits = pointIds % 100
result = (digits >= minVal) & (digits <= maxVal)
return result
|
d2f040480c6513e9b845aaaa13485ecbe3376c41
| 29,489 |
def test_results_form_average(fill_market_trade_databases):
"""Tests averages are calculated correctly by ResultsForm, compared to a direct calculation
"""
Mediator.get_volatile_cache().clear_cache()
market_df, trade_df, order_df = get_sample_data()
trade_df, _ = MetricSlippage().calculate_metric(trade_order_df=trade_df, market_df=market_df, bid_benchmark='mid',
ask_benchmark='mid')
results_form = BarResultsForm(market_trade_order_list=['trade_df'],
metric_name='slippage',
aggregation_metric='mean',
aggregate_by_field=['ticker', 'venue'], scalar=10000.0,
weighting_field='executed_notional_in_reporting_currency')
results_df = results_form.aggregate_results(market_trade_order_df=trade_df, market_df=market_df,
market_trade_order_name='trade_df')
slippage_average = float(results_df[0][0].values[0])
# Directly calculate slippage
def grab_slippage(trade_df):
return 10000.0 * ((trade_df['slippage'] * trade_df['executed_notional_in_reporting_currency']).sum() \
/ trade_df['executed_notional_in_reporting_currency'].sum())
slippage_average_comp = grab_slippage(trade_df)
# Check the average slippage
assert slippage_average - slippage_average_comp < eps
slippage_average_venue = results_df[1][0]['venue'][venue_filter]
slippage_average_venue_comp = grab_slippage(trade_df[trade_df['venue'] == venue_filter])
# Check the average slippage by venue
assert slippage_average_venue - slippage_average_venue_comp < eps
|
92cb07fe56f026f0b1449e07e635db50581cffa9
| 29,490 |
from operator import mod
def _prot_builder_from_seq(sequence):
"""
Build a protein from a template.
Adapted from fragbuilder
"""
names = []
bonds_mol = []
pept_coords, pept_at, bonds, _, _, offset = templates_aa[sequence[0]]
names.extend(pept_at)
bonds_mol.extend(bonds)
offsets = [0, offset]
for idx, aa in enumerate(sequence[1:]):
tmp_coords, tmp_at, bonds, _, _, offset = templates_aa[aa]
if sequence[0] == 'B' and idx == 0:
v3 = pept_coords[0 + offsets[idx]] # C
v2 = pept_coords[2 + offsets[idx]] # CH3
v1 = (pept_coords[5 + offsets[idx]] + pept_coords[3 + offsets[idx]]) / 2 # HH31 / HH33
#['C', 'O', 'CH3', 'HH31', 'HH32', 'HH33'],
else:
v3 = pept_coords[2 + offsets[idx]] # C
v2 = pept_coords[1 + offsets[idx]] # CA
v1 = pept_coords[0 + offsets[idx]] # N
connectionpoint = v3 + (v2 - v1) / mod(v2 - v1) * constants.peptide_bond_lenght
connectionvector = tmp_coords[0] - connectionpoint
# translate
tmp_coords = tmp_coords - connectionvector
# first rotation
v4 = v3 - v2 + connectionpoint
axis1 = perp_vector(tmp_coords[1], connectionpoint, v4)
angle1 = get_angle(tmp_coords[1], connectionpoint, v4)
center1 = connectionpoint
ba = axis1 - center1
tmp_coords = tmp_coords - center1
tmp_coords = tmp_coords @ rotation_matrix_3d(ba, angle1)
tmp_coords = tmp_coords + center1
axis2 = tmp_coords[1] - connectionpoint
axis2 = axis2 / mod(axis2) + connectionpoint
d3 = tmp_coords[1]
d4 = tmp_coords[2]
angle2 = constants.pi + get_torsional(v3, connectionpoint, d3, d4)
if aa == 'P':
angle2 -= - 90 * bmb.constants.degrees_to_radians
center2 = connectionpoint
ba = axis2 - center2
tmp_coords = tmp_coords - center2
tmp_coords = tmp_coords @ rotation_matrix_3d(ba, angle2)
tmp_coords = tmp_coords + center2
names.extend(tmp_at)
offsets.append(offsets[idx + 1] + offset)
pept_coords = np.concatenate([pept_coords, tmp_coords])
# create a list of bonds from the template-bonds by adding the offset
prev_offset = offsets[-3]
last_offset = offsets[-2]
bonds_mol.extend([(i + last_offset, j + last_offset)
for i, j in bonds] + [(2 + prev_offset, last_offset)])
offsets.append(offsets[-1] + offset)
exclusions = _exclusions_1_3(bonds_mol)
# generate a list with the names of chemical elements
elements = []
for i in names:
element = i[0]
if element in ['1', '2', '3']:
element = i[1]
elements.append(element)
occupancies = [1.] * len(names)
bfactors = [0.] * len(names)
return (pept_coords,
names,
elements,
occupancies,
bfactors,
offsets,
exclusions)
|
da182a0dd323db2e3930a72c0080499ed643be1a
| 29,491 |
def connect_thread():
"""
Starts a SlaveService on a thread and connects to it. Useful for testing
purposes. See :func:`rpyc.utils.factory.connect_thread`
:returns: an RPyC connection exposing ``SlaveService``
"""
return factory.connect_thread(SlaveService, remote_service = SlaveService)
|
557dfbd7a5389345f7becdc550f4140d74cf6695
| 29,492 |
from typing import Tuple
def yxyx_to_albu(yxyx: np.ndarray,
img_size: Tuple[PosInt, PosInt]) -> np.ndarray:
"""Unnormalized [ymin, xmin, ymax, xmax] to Albumentations format i.e.
normalized [ymin, xmin, ymax, xmax].
"""
h, w = img_size
ymin, xmin, ymax, xmax = yxyx.T
ymin, ymax = ymin / h, ymax / h
xmin, xmax = xmin / w, xmax / w
xmin = np.clip(xmin, 0., 1., out=xmin)
xmax = np.clip(xmax, 0., 1., out=xmax)
ymin = np.clip(ymin, 0., 1., out=ymin)
ymax = np.clip(ymax, 0., 1., out=ymax)
xyxy = np.stack([xmin, ymin, xmax, ymax], axis=1).reshape((-1, 4))
return xyxy
|
d6429ca3c694e5f2fd69dba645e3d97cab4720f8
| 29,493 |
def parse_tags(source):
"""
extract any substring enclosed in parenthesis
source should be a string
normally would use something like json for this
but I would like to make it easy to specify these tags and their groups
manually (via text box or command line argument)
http://stackoverflow.com/questions/1651487/python-parsing-bracketed-blocks
"""
unmatched_count = 0
start_pos = 0
opened = False
open_pos = 0
cur_pos = 0
finished = []
segments = []
for character in source:
#scan for mismatched parenthesis:
if character == '(':
unmatched_count += 1
if not opened:
open_pos = cur_pos
opened = True
if character == ')':
unmatched_count -= 1
if opened and unmatched_count == 0:
clean = source[start_pos:open_pos]
clean = clean.strip()
if clean:
finished.extend(clean.split())
segment = source[open_pos:cur_pos+1]
#segments.append(segment)
#get rid of bounding parentheses:
pruned = segment[1:-1]
group = pruned.split()
finished.append(group)
opened = False
start_pos = cur_pos+1
cur_pos += 1
assert unmatched_count == 0
if start_pos != cur_pos:
#get anything that was left over here
remainder = source[start_pos:cur_pos].strip()
finished.extend(remainder.split())
## #now check on recursion:
## for item in segments:
## #get rid of bounding parentheses:
## pruned = item[1:-1]
## if recurse:
## results = parse_tags(pruned, recurse)
## finished.expand(results)
## else:
## finished.append(pruned.strip())
return finished
|
315ea121cec56a38edc16bfa9e6a7ccaeeab1dc2
| 29,494 |
def NonZeroMin(data):
"""Returns the smallest non-zero value in an array.
Parameters
----------
data : array-like
A list, tuple or array of numbers.
Returns
-------
An integer or real value, depending on data's dtype.
"""
# 1) Convert lists and tuples into arrays
if type(data) != 'numpy.ndarray':
data = np.array(data)
# 2) Find the minimal non-zero value and return.
idx = np.where(data)
return data[idx].min()
|
89d466c9d739dc511cd37fd71283ae8c6b2cc388
| 29,495 |
def get_99_pct_params_ln(x1: float, x2: float):
"""Wrapper assuming you want the 0.5%-99.5% inter-quantile range.
:param x1: the lower value such that pr(X > x1) = 0.005
:param x2: the higher value such that pr(X < x2) = 0.995
"""
return get_lognormal_params_from_qs(x1, x2, 0.005, 0.995)
|
2ce424a289ea8a5af087ca5120b3d8763d1e2f31
| 29,496 |
def summarize_data(data):
"""
"""
#subset desired columns
data = data[['scenario', 'strategy', 'confidence', 'decile', 'cost_user']]
#get the median value
data = data.groupby(['scenario', 'strategy', 'confidence', 'decile'])['cost_user'].median().reset_index()
data.columns = ['Scenario', 'Strategy', 'Confidence', 'Decile', 'Cost Per User ($)']
return data
|
9964d99ed70a1405f1c94553172fd6830371472a
| 29,497 |
def gen_cam(image, mask):
"""
生成CAM图
:param image: [H,W,C],原始图像
:param mask: [H,W],范围0~1
:return: tuple(cam,heatmap)
"""
# mask转为heatmap
heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
# heatmap = np.float32(heatmap) / 255
heatmap = heatmap[..., ::-1] # gbr to rgb
# 合并heatmap到原始图像
cam = np.float32(heatmap) + np.float32(image)
return norm_image(cam), heatmap.astype(np.uint8)
|
a9d221b6d536aef6c2e2093bb20614cf682de704
| 29,498 |
def get_username(strategy, details, backend, user=None, *args, **kwargs):
"""Resolve valid username for use in new account"""
if user:
return None
settings = strategy.request.settings
username = perpare_username(details.get("username", ""))
full_name = perpare_username(details.get("full_name", ""))
first_name = perpare_username(details.get("first_name", ""))
last_name = perpare_username(details.get("last_name", ""))
names_to_try = [username, first_name]
if username:
names_to_try.append(username)
if first_name:
names_to_try.append(first_name)
if last_name:
# if first name is taken, try first name + first char of last name
names_to_try.append(first_name + last_name[0])
if full_name:
names_to_try.append(full_name)
username_length_max = settings.username_length_max
for name in names_to_try:
if len(name) > username_length_max:
names_to_try.append(name[:username_length_max])
for name in filter(bool, names_to_try):
try:
validate_username(settings, name)
return {"clean_username": name}
except ValidationError:
pass
|
728a0aadf9aa58369fcf791d8cccb0d9214a4583
| 29,501 |
def not_at_max_message_length():
""" Indicates if we have room left in message """
global message
return message.count(SPACE) < WORD_LIMIT
|
0bd7d758c80ed272de0571b4f651fe6a24c39b58
| 29,502 |
import math
def _fill_arc_trigonometry_array():
"""
Utility function to fill the trigonometry array used by some arc* functions (arcsin, arccos, ...)
Returns
-------
The array filled with useful angle measures
"""
arc_trig_array = [
-1,
math.pi / 4, # -45°
math.pi / 6, # -30°
0, # 0°
math.pi / 6, # 30°
math.pi / 4, # 45°
1
]
return arc_trig_array
|
6b5c39dbacf028d84a397e2911f9c9b7241fe0f4
| 29,506 |
from typing import Optional
def get_default_tag_to_block_ctor(
tag_name: str
) -> Optional[CurvatureBlockCtor]:
"""Returns the default curvature block constructor for the give tag name."""
global _DEFAULT_TAG_TO_BLOCK_CTOR
return _DEFAULT_TAG_TO_BLOCK_CTOR.get(tag_name)
|
33d002ef206aa13c963b951325a92f49c86eb202
| 29,507 |
def panel_list_tarefas(context, tarefas, comp=True, aluno=True):
"""Renderiza uma lista de tarefas apartir de um Lista de tarefas"""
tarefas_c = []
for tarefa in tarefas:
tarefas_c.append((tarefa, None))
context.update({'tarefas': tarefas_c, 'comp': comp})
return context
|
3de659af41a6d7550104321640526f1970fd415c
| 29,508 |
def remove_stopwords(label):
"""
Remove stopwords from a single label.
"""
tokenized = label.split()
# Keep removing stopwords until a word doesn't match.
for i,word in enumerate(tokenized):
if word not in STOPWORDS:# and len(word) > 1:
return ' '.join(tokenized[i:])
# For the empty string.
return ''
|
f15e50e5e11ecc6a0abca6b68219789e72070a69
| 29,510 |
def single_device_training_net(data_tensors, train_net_func):
""" generate training nets for multiple devices
:param data_tensors: [ [batch_size, ...], [batch_size, ...], ... ]
:param train_net_func: loss, display_outputs, first_device_output = train_net_func(data_tensors, default_reuse)
Remark: loss can be a list or a dict, then the return of this function can be organized accordingly
:return output_entry: a class instance with fields for a single device
:return unique_variable_list: variable_list on the first/only device
"""
old_variable_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
output_struct, new_variable_list = _single_device_training_net(data_tensors, train_net_func)
unique_variable_list = list(set(new_variable_list) - set(old_variable_list))
return output_struct, unique_variable_list
|
d94e7b552f3612d2f0fd16973612adea77de0bd0
| 29,511 |
def clopper_pearson(k,n,alpha):
"""Confidence intervals for a binomial distribution of k expected successes on n trials:
http://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval
Parameters
----------
k : array_like
number of successes
n : array_like
number of trials
alpha : float
confidence level
Returns
-------
hi, lo : array_like
lower and upper bounds on the expected number of successes
"""
lo = beta.ppf(alpha/2, k, n-k+1)
lo[onp.isnan(lo)] = 0 # hack to remove NaNs where we only have 0 samples
hi = beta.ppf(1 - alpha/2, k+1, n-k)
hi[onp.isnan(hi)] = 1 # hack to remove NaNs where the marginal is 1
return lo, hi
|
c10db17a4fb75cc0d7304aceacde9cf716a5cb77
| 29,515 |
def add_link(news_list:list) -> list:
"""
Description:
Function to remove the readmore and the add the url as a marked up link for the 'content' key in the 'articles' dictionary.
Arguments:
news_list {list} : list containing the news articles dictionaries
Returns:
news_list {list} : list containing the news articles dictionaries
"""
x = 0
for i in news_list:
# retrieves the content t be changed
content = news_list[x]['content']
# retrieves the url to be added to content
url = news_list[x]['url']
size = len(content)
# removes [readmore] and adds the link to the webpage in markup
content = content[:size-13].replace('[', '') + Markup(f"<a href='{url}'>Read More</a>")
# adds the new content
news_list[x]['content'] = content
x = x + 1
return news_list
|
f1339ddb8854800ae241b7cbb0badc6654c30696
| 29,516 |
import array
def taitnumber(c):
""" Return Tait number from signed edge list of Tait graph. """
c = array(c) # If type(c) != ndarray
tau = sum(sign(c[:, 0]))
return tau
|
b30e3d294ae4af80e5d09b7c5de0a6a0682d6d27
| 29,517 |
def _difference_map(image, color_axis):
"""Difference map of the image.
Approximate derivatives of the function image[c, :, :]
(e.g. PyTorch) or image[:, :, c] (e.g. Keras).
dfdx, dfdy = difference_map(image)
In:
image: numpy.ndarray
of shape C x h x w or h x w x C, with C = 1 or C = 3
(color channels), h, w >= 3, and [type] is 'Float' or
'Double'. Contains the values of functions f_b:
R ^ 2 -> R ^ C, b = 1, ..., B, on the grid
{0, ..., h - 1} x {0, ..., w - 1}.
Out:
dfdx: numpy.ndarray
dfdy: numpy.ndarray
of shape C x h x w or h x w x C contain the x and
y derivatives of f at the points on the grid,
approximated by central differences (except on
boundaries):
For c = 0, ... , C, i = 1, ..., h - 2,
j = 1, ..., w - 2.
e.g. for shape = c x h x w:
dfdx[c, i, j] = (image[c, i, j + 1] -
image[c, i, j - 1]) / 2
dfdx[c, i, j] = (image[c, i + 1, j] -
image[c, i - 1, j]) / 2
positive x-direction is along rows from left to right.
positive y-direction is along columns from above to below.
"""
if color_axis == 2:
image = _transpose_image(image)
# Derivative in x direction (rows from left to right)
dfdx = np.zeros_like(image)
# forward difference in first column
dfdx[:, :, 0] = image[:, :, 1] - image[:, :, 0]
# backwards difference in last column
dfdx[:, :, -1] = image[:, :, -1] - image[:, :, -2]
# central difference elsewhere
dfdx[:, :, 1:-1] = 0.5 * (image[:, :, 2:] - image[:, :, :-2])
# Derivative in y direction (columns from above to below)
dfdy = np.zeros_like(image)
# forward difference in first row
dfdy[:, 0, :] = image[:, 1, :] - image[:, 0, :]
# backwards difference in last row
dfdy[:, -1, :] = image[:, -1, :] - image[:, -2, :]
# central difference elsewhere
dfdy[:, 1:-1, :] = 0.5 * (image[:, 2:, :] - image[:, :-2, :])
return dfdx, dfdy
|
deff16dbe73005d52444babf05857c2cfea25e0b
| 29,518 |
import math
def force_grid(force_parameters, position_points, velocity_min, velocity_max):
"""Calculates the force on a grid of points in phase space."""
velocity_min_index = velocity_index(velocity_min)
velocity_max_index = velocity_index(velocity_max)
spacing = 2*math.pi / position_points
force = np.zeros((velocity_max_index - velocity_min_index, position_points + 1))
for vel_index in range(velocity_min_index, velocity_max_index):
for position_index in range(position_points + 1):
position = spacing * position_index
features = fourier_basis(position)
force[vel_index - velocity_min_index][position_index] = (
force_parameters[vel_index] @ features)
return force
|
d8d74604c8e313904f97e97364778b0db8db801c
| 29,519 |
def valueFromMapping(procurement, subcontract, grant, subgrant, mapping):
"""We configure mappings between FSRS field names and our needs above.
This function uses that config to derive a value from the provided
grant/subgrant"""
subaward = subcontract or subgrant
if mapping is None:
return ''
elif isinstance(mapping, str):
return getattr(subaward, mapping)
elif isinstance(mapping, tuple) and subcontract:
return valueFromMapping(procurement, subcontract, grant, subgrant,
mapping[0])
elif isinstance(mapping, tuple) and subgrant:
return valueFromMapping(procurement, subcontract, grant, subgrant,
mapping[1])
else:
raise ValueError("Unknown mapping type: {}".format(mapping))
|
1bf2dda830183d1c8289e957b83b1c0d01619160
| 29,521 |
def get_cards_in_hand_values_list(player):
"""Gets all the cards in a players's hand and return as a values list"""
return list(Card.objects.filter(cardgameplayer__player=player, cardgameplayer__status=CardGamePlayer.HAND).values('pk', 'name', 'text'))
|
474ac071950857783dfd76b50ae08483a03fc8bc
| 29,522 |
def get_dGdE(fp, tau_E, a_E, theta_E, wEE, wEI, I_ext_E, **other_pars):
"""
Compute dGdE
Args:
fp : fixed point (E, I), array
Other arguments are parameters of the Wilson-Cowan model
Returns:
J : the 2x2 Jacobian matrix
"""
rE, rI = fp
# Calculate the J[0,0]
dGdrE = (-1 + wEE * dF(wEE * rE - wEI * rI + I_ext_E, a_E, theta_E)) / tau_E
return dGdrE
|
9a53cc9b0cadea8f8884b64d687a2397c0a973a7
| 29,523 |
def convert_data_to_ints(data, vocab2int, word_count, unk_count, eos=True):
"""
Converts the words in the data into their corresponding integer values.
Input:
data: a list of texts in the corpus
vocab2list: conversion dictionaries
word_count: an integer to count the words in the dataset
unk_count: an integer to count the <UNK> tokens in the dataset
eos: boolean whether to append <EOS> token at the end or not (default true)
Returns:
converted_data: a list of corpus texts converted to integers
word_count: updated word count
unk_count: updated unk_count
"""
converted_data = []
for text in data:
converted_text = []
for token in text.split():
word_count += 1
if token in vocab2int:
# Convert each token in the paragraph to int and append it
converted_text.append(vocab2int[token])
else:
# If it's not in the dictionary, use the int for <UNK> token instead
converted_text.append(vocab2int['<UNK>'])
unk_count += 1
if eos:
# Append <EOS> token if specified
converted_text.append(vocab2int['<EOS>'])
converted_data.append(converted_text)
assert len(converted_data) == len(data)
return converted_data, word_count, unk_count
|
c415aea164f99bc2a44d5098b6dbcc3d723697a6
| 29,524 |
def apply_objective_fn(state, obj_fn, precision, scalar_factor=None):
"""Applies a local ObjectiveFn to a state.
This function should only be called inside a pmap, on a pmapped state.
`obj_fn` will usually be a the return value of `operators.gather_local_terms`.
See the docstrings of `SevenDiscretedOperator` and `operators.gather_local_terms`
for more information.
Args:
state: The probabilityfunction.
obj_fn: The ObjectiveFn, given as a sequence of `SevenDiscretedOperator`s, that
represent local terms collected together.
precision: Jax matrix multiplication precision.
add_original: If `add_original` is `True`, return
(1 + scalar_factor * obj_fn)|state>, otherwise return obj_fn|state>. Should be a
Jax tracing static argument.
scalar_factor: Optional; If `None`, return obj_fn|state>, otherwise return
1 + scalar_factor * obj_fn)|state>. `None` by default.
Returns:
Either (1 + scalar_factor * obj_fn)|state> or obj_fn|state>, depending on
`scalar_factor`.
"""
orig_shape = state.shape
_, n_local_discretes = number_of_discretes(state)
if scalar_factor is not None:
result = state
else:
result = jnp.zeros_like(state)
i = 0
for n_term, term in enumerate(obj_fn):
position_to_apply = i - term.left_pad
if scalar_factor is not None:
array = scalar_factor * term.array
else:
array = term.array
result = result + _apply_building_block(
state,
array,
position_to_apply,
n_local_discretes,
precision,
).reshape(result.shape)
i += term.width
if n_term < len(obj_fn) - 1:
state, result, i = _apply_permutations(
state,
result,
i=i,
permutations=term.permutations_after,
)
else:
# For the last term, avoid doing an unnecessary permutation on the
# original state that is no longer needed.
del state
result, i = _apply_permutations(
result,
i=i,
permutations=term.permutations_after,
)
return result.reshape(orig_shape)
|
c4fa72f84ce241aa765416fe21ff5426758d5303
| 29,525 |
import requests
def oauth_generate_token(
consumer_key, consumer_secret, grant_type="client_credentials",
env="sandbox"):
"""
Authenticate your app and return an OAuth access token.
This token gives you time bound access token to call allowed APIs.
NOTE: The OAuth access token expires after an hour (3600 seconds),
after which, you will need to generate another access token so you
need to keep track of this.
:param consumer_key:
:param consumer_secret:
:param grant_type:
:param env:
:return response:
"""
url = urls.get_generate_token_url(env)
try:
req = requests.get(
url, params=dict(grant_type=grant_type),
auth=(consumer_key, consumer_secret))
except Exception as e:
logger.exception("Error in {} request. {}".format(url, str(e)))
return None, None
else:
return req.json(), req.status_code
|
7ab44b7ba1eb569d0b498946e2936928612e3fa7
| 29,526 |
from typing import List
import shlex
def parse_quoted_string(string: str, preserve_quotes: bool) -> List[str]:
"""
Parse a quoted string into a list of arguments
:param string: the string being parsed
:param preserve_quotes: if True, then quotes will not be stripped
"""
if isinstance(string, list):
# arguments are already a list, return the list we were passed
lexed_arglist = string
else:
# Use shlex to split the command line into a list of arguments based on shell rules
lexed_arglist = shlex.split(string, posix=False)
if not preserve_quotes:
lexed_arglist = [utils.strip_quotes(arg) for arg in lexed_arglist]
return lexed_arglist
|
6715778f5190445e74b8705542cbfdb1fe022ecc
| 29,527 |
def scalar(name, scalar_value):
""" 转换标量数据到potobuf格式 """
scalar = make_np(scalar_value)
assert (scalar.squeeze().ndim == 0), 'scalar should be 0D'
scalar = float(scalar)
metadata = SummaryMetadata(plugin_data=SummaryMetadata.PluginData(plugin_name='scalars'))
return Summary(value=[Summary.Value(tag=name,
simple_value=scalar,
metadata=metadata)])
|
e31046a00dc0e2ae6c33bd041b34652c08d2a439
| 29,528 |
def filter_citations_by_type(list_of_citations, violation_description):
"""Gets a list of the citations for a particular violation_description.
"""
citations = []
for citation in list_of_citations:
filtered_citation = check_citation_type(citation, violation_description)
if filtered_citation:
citations.append(filtered_citation)
return citations
|
398d7cbe43761070c8b5b9117478f6fe5c985a2c
| 29,529 |
def user_import_circular_database(injector, session, user_mock_circular_database) -> UserMockDataSource:
"""Return the circular data source and import its schema
to the user's project."""
facade = injector.get(DataSourceFacade)
facade.import_schema(user_mock_circular_database.data_source)
session.commit()
return user_mock_circular_database
|
492b87c7cf8d5ef8306fb827620cba860677f5be
| 29,530 |
import torch
def loss_fn(model, data, marginal_prob_std, eps=1e-5):
"""The loss function for training score-based generative models.
Args:
model: A PyTorch model instance that represents a
time-dependent score-based model.
x: A mini-batch of training data.
marginal_prob_std: A function that gives the standard deviation of
the perturbation kernel.
eps: A tolerance value for numerical stability.
"""
node2graph = data.batch
edge2graph = node2graph[data.edge_index[0]]
graph_num = len(data.smiles)
d = data.edge_length # (num_edge,1)
random_t = torch.rand(graph_num, device=d.device) * (1. - eps) + eps # (batch_size)
z = torch.randn_like(d) # (num_edge,1)
std = marginal_prob_std(random_t)[edge2graph] # (num_edge)
perturbed_d = d + z * std[:, None] # std[:,None]转化尺度为(num_edge,1),perturbed_d.size() = (edge_num,1)
data.edge_length = perturbed_d
score = model(data, random_t)
loss = torch.mean((score[:, None] * std[:, None] + z) ** 2)
return loss
|
f42dd43d1de865ec31c7702e747852a6df04e479
| 29,531 |
from functools import reduce
from operator import mul
def nCk(n, k):
"""
Combinations number
"""
if n < 0: raise ValueError("Invalid value for n: %s" % n)
if k < 0 or k > n: return 0
if k in (0, n): return 1
if k in (1, n-1): return n
low_min = 1
low_max = min(n, k)
high_min = max(1, n - k + 1)
high_max = n
return reduce(mul, range(high_min, high_max + 1), 1) // reduce(mul, range(low_min, low_max + 1), 1)
|
9d84ba8fad27860f64980fb4165f72f0a7ec944c
| 29,532 |
def knowledge_extract_from_json():
"""
半结构化数据知识抽取的第二步
json <-> 数据表映射
Returns:
"""
data = request.json
result = extract_data_from_json(data)
return jsonify({"data": result})
|
361d38891a8d90d30a75e3041e082e9c60395666
| 29,533 |
import random
def vote_random_ideas(request, owner, repository, full_repository_name):
"""
Get 2 random ideas
"""
database_repository = get_object_or_404(models.Repository, owner=owner, name=repository)
jb = jucybot.from_config()
context = {}
context = jb.get_issues(full_repository_name, context=context, issues_to_get=['ready'])
# If there are not enough issues ready, also get the new issues
if len(context['issues']) < 4:
context = jb.get_issues(full_repository_name, context=context, issues_to_get=['new'])
issues = get_issues_subscribers(request, database_repository, context['issues'])
# Remove issues I already voted for
issues = [issue for issue in issues if not issue['subscribed']]
# If there are less than 2 issues, return null
try:
issues = random.sample(issues, 2)
except ValueError:
issues = None
return JsonResponse({'issues': [{
'title': issue['title'],
'body': issue['body'],
'number': issue['number'],
'total_subscribers': issue['total_subscribers']
} for issue in issues] if issues else None})
|
dba4d24711e49f68e85ef6b9f5d3fb4428cb6351
| 29,534 |
def delta_EF_asym(ave,t_e,t_mu,comp,t_f,n,alpha = None,max_ave_H = 1):
"""computes the EF with asymptotic f, f(N) = f_i*H_i*N_i/(N_i+H_i)
For more information see S10
H_i is uniformly distributed in [0,2*ave_H]
Input
ave, t_e, t_mu, t_f, comp,n:
As in output of rand_par
alpha: optional
Is not needed. They are just used s.t. one can
run delta_EF_asym
returns:
deltaEF/EF: array
Array containing 100*deltaEF/EF"""
num = len(ave) #number of communities
# choose distribution of H: H ~u[0,2*ave]
ave_H = uni(0,max_ave_H,num)
t_H = uni(-1/sqrt, 1/sqrt,num) #stdv/mean of H
H = lambda x: ave_H*(1+t_H*sqrt*x) #H_i for each species in a community
#asymptotic EF in N, EF(N) = f_i*H_i*N_i/(N_i+H_i)
#change to consider different contribution to function
eco_fun = lambda x, N: n*(1+t_f*x*sqrt)*H(x)*N(x)/(N(x)+H(x))
# computes the equilibrium densities of species N, in changed and ref site
N_ref = lambda x: (1+t_mu*sqrt*x-comp)/(1+alpha)
N_change = lambda x: ((1+x*t_mu*sqrt)*(1-ave*(1+t_e*sqrt*x))-\
comp*(1-ave*(1+t_mu*t_e)))/(1+alpha)
# integrate over all species for EF
x_simp = np.array(num*[np.linspace(-1,1,51)]) #x_axes
y_ref = eco_fun(x_simp.T, N_ref).T #y_values in ref
y_change = eco_fun(x_simp.T, N_change).T #y values in changed site
EF_ref = simps(y_ref,x_simp)
EF_change = simps(y_change,x_simp)
return 100*(EF_change-EF_ref)/EF_ref
|
82abe7a6473b9a0b432654837fb8bffff86513e8
| 29,535 |
from scipy.signal.spectral import _median_bias
from scipy.signal.windows import get_window
def time_average_psd(data, nfft, window, average="median", sampling_frequency=1):
"""
Estimate a power spectral density (PSD) by averaging over non-overlapping
shorter segments.
This is different from many other implementations as it does not account
for the window power loss factor (<window ** 2>)
Parameters
----------
data: np.ndarray
The input data to use to estimate the PSD
nfft: int
The number of input elements per segment
window: [str, tuple]
Input arguments for scipy.signal.windows.get_window to specify the
window.
average: str
Time averaging method, should be either "mean" or "median"
sampling_frequency: float
The sampling frequency of the input data, used to normalize the PSD
estimate to have dimensions of 1 / Hz.
Returns
-------
psd: np.ndarray
The estimate PSD
"""
if not isinstance(window, np.ndarray):
window = get_window(window, nfft)
blocked_data = data.reshape(-1, nfft) * window
blocked_psd = abs(np.fft.rfft(blocked_data, axis=-1) / sampling_frequency) ** 2
if average == "median":
normalization = 1 / _median_bias(len(blocked_data))
func = np.median
elif average == "mean":
normalization = 1
func = np.mean
else:
raise ValueError(f"PSD method should be mean or median, not {average}")
psd = func(blocked_psd, axis=0) / 2 * normalization
return psd
|
98fb788fdec7f2a868cc576f209c37e196880edf
| 29,536 |
import torch
def Variable(tensor, *args, **kwargs):
"""
The augmented Variable() function which automatically applies cuda() when gpu is available.
"""
if use_cuda:
return torch.autograd.Variable(tensor, *args, **kwargs).cuda()
else:
return torch.autograd.Variable(tensor, *args, **kwargs)
|
b8b0534efd0fd40966eaa70e78e6a8db41156cd4
| 29,537 |
def edit_distance(graph1, graph2, node_attr='h', edge_attr='e', upper_bound=100, indel_mul=3, sub_mul=3):
"""
Calculates exact graph edit distance between 2 graphs.
Args:
graph1 : networkx graph, graph with node and edge attributes
graph2 : networkx graph, graph with node and edge attributes
node_attr : str, key for node attribute
edge_attr : str, key for edge attribute
upper_bound : int, maximum edit distance to consider
indel_mul: float, insertion/deletion cost
sub_mul: float, substitution cost
Returns:
np.float, distance, how similar graph1 is to graph2
"""
def node_substitution_scoring(dict_1, dict_2):
"""Calculates node substitution score."""
multiplier = sub_mul if distance.rogerstanimoto(
dict_1[node_attr], dict_2[node_attr]) != 0 else 0
return multiplier*(1 - distance.rogerstanimoto(
dict_1[node_attr], dict_2[node_attr]))
def edge_substitution_scoring(dict_1, dict_2):
"""Calculates edge substitution score."""
multiplier = sub_mul if distance.rogerstanimoto(
dict_1[edge_attr], dict_2[edge_attr]) != 0 else 0
return multiplier*(1 - distance.rogerstanimoto(
dict_1[edge_attr], dict_2[edge_attr]))
def constant_value(dict_1):
"""Returns constant score for insertion/deletion."""
return indel_mul
graph1 = feature_conversion(graph1, node_attr, edge_attr)
graph2 = feature_conversion(graph2, node_attr, edge_attr)
return min(
nx.optimize_graph_edit_distance(
graph1, graph2,
node_subst_cost = node_substitution_scoring,
edge_subst_cost = edge_substitution_scoring,
upper_bound = upper_bound,
node_del_cost = constant_value,
node_ins_cost = constant_value,
edge_del_cost = constant_value,
edge_ins_cost = constant_value,
))
|
550f44e91e60a7c3308d5187af3d32054cf6dffa
| 29,539 |
def get_node_elements(coord,scale,alpha,dof,bcPrescr=None,bc=None,bc_color='red',fPrescr=None,f=None,f_color='blue6',dofs_per_node=None):
"""
Routine to get node node actors.
:param array coord: Nodal coordinates [number of nodes x 3]
:param int scale: Node actor radius
:param float alpha: Node actor transparency [0-1]
:param array dof: Global degrees of freedom [number of nodes x degrees of freedom per node]
:param array bcPrescr: Degrees of freedom with prescribed boundary conditions [number of prescribed boundary contidions x 1]
:param array bc: Values for prescribed boundary conditions [number of prescribed boundary contidions x 1]
:param string bc_color: Color for nodes with prescribed boundary conditions
:param array fPrescr: Degrees of freedom with applied forces [number of applied forces x 1]
:param array f: Values for forces [number of applied forces x 1]
:param string f_color: Color for nodes with applied forces
:param int dofs_per_node: Degrees of freedom per node [1-6]
:return list nodes: Node actors
"""
nnode = np.size(coord, axis = 0)
ncoord = np.size(coord, axis = 1)
nodes = []
bc_dict = {}
indx = 0
if isinstance(bcPrescr, np.ndarray):
for i in bcPrescr:
bc_dict[i] = bc[indx]
indx += 1
f_dict = {}
indx = 0
if isinstance(fPrescr, np.ndarray):
for i in fPrescr:
f_dict[i] = f[indx]
indx += 1
for i in range(nnode):
dofs = dof[i]
if np.any(np.isin(bcPrescr, dofs, assume_unique=True)) == True:
color = bc_color
elif np.any(np.isin(fPrescr, dofs, assume_unique=True)) == True:
color = f_color
else:
color = 'black'
node = Sphere(c=color).scale(1.5*scale).pos([coord[i,0],coord[i,1],coord[i,2]]).alpha(alpha)
if np.any(np.isin(bcPrescr, dofs, assume_unique=True)) == True:
node.name = f"Node nr. {i+1}, DoFs & BCs: ["
for j in range(dofs_per_node):
node.name += str(dof[i,j])
if dof[i,j] in bc_dict:
node.name += (': ' + str(bc_dict[dof[i,j]]))
if j == dofs_per_node-1:
node.name += ']'
else:
node.name += ', '
elif np.any(np.isin(fPrescr, dofs, assume_unique=True)) == True:
node.name = f"Node nr. {i+1}, DoFs & Forces: ["
for j in range(dofs_per_node):
node.name += str(dof[i,j])
if dof[i,j] in f_dict:
node.name += (': ' + str(f_dict[dof[i,j]]))
if j == dofs_per_node-1:
node.name += ']'
else:
node.name += ', '
else:
node.name = f"Node nr. {i+1}, DoFs: ["
for j in range(dofs_per_node):
node.name += str(dof[i,j])
if j == dofs_per_node-1:
node.name += ']'
else:
node.name += ', '
nodes.append(node)
return nodes
|
f6e9c2eec12c1816331651d821fa907e5ce34d42
| 29,540 |
import logging
import time
import pickle
def temporal_testing(
horizon, model, observ_interval, first_stage,
bm_threshold, ratio, bootstrap, epsilon, solve
):
"""
first stage random forest, cross validation,
not selecting a best model,
without separate testing
"""
model_name = "horizon-%s-ratio-%0.2f" % (horizon, ratio)
# ====================== load data ========================
observ_horizon = (horizon - 1) * 60
interval = 5 if first_stage == "RF" else 12
ML_data = pd.read_csv(
'data/{}-{}.csv'.format(first_stage, horizon),
index_col=False
)
sepsis_stream = ML_data.loc[ML_data['label'] == 1]
sepsis_stream = sepsis_stream.reset_index(drop=True)
sepsis_stream = sepsis_stream.drop(
['patientunitstayid', 'label'], axis=1
)
nonsep_stream = ML_data.loc[ML_data['label'] == 0]
nonsep_stream = nonsep_stream.reset_index(drop=True)
nonsep_stream = nonsep_stream.drop(
['patientunitstayid', 'label'], axis=1
)
# ===================== discretize data =========================
sepsis_discr = discretize_data(
stream_data=dcopy(sepsis_stream), levels=dcopy(model.observations)
)
nonsep_discr = discretize_data(
stream_data=dcopy(nonsep_stream), levels=dcopy(model.observations)
)
# =========================== Bootstrapping ===============================
# metrics
sensitivity, specificity, precision, f_1, ave_time = {}, {}, {}, {}, {}
bm_sensitivity, bm_specificity, bm_precision = {}, {}, {}
bm_f_1, bm_ave_time = {}, {}
# update trans_function according to observation_interval
def trans_func(new_state, old_state, action):
"""transition function"""
p = 0.99967 ** (observ_interval * interval)
if old_state == "sepsis":
if new_state == "sepsis":
return 1.0
if new_state == "nonsep":
return 0.0
if old_state == "nonsep":
if new_state == "sepsis":
return 1 - p
if new_state == "nonsep":
return p
return 0
model.trans_func = trans_func
# start bootstrap
for boot in range(bootstrap):
logging.info("Bootstrap: {}\n".format(boot))
# -------------- sample data ---------------
# index
sepsis_tr_ind = np.random.choice(
range(sepsis_discr.shape[0]), 500, False
)
nonsep_tr_ind = np.random.choice(
range(nonsep_discr.shape[0]), 500, False
)
# data
sepsis_data, nonsep_data = {}, {}
# train data
sepsis_data['train'] = sepsis_discr.iloc[sepsis_tr_ind, :]
nonsep_data['train'] = nonsep_discr.iloc[nonsep_tr_ind, :]
# test data
sepsis_data['test'] = sepsis_discr[
~sepsis_discr.index.isin(sepsis_tr_ind)
]
nonsep_data['test'] = nonsep_discr.iloc[
~nonsep_discr.index.isin(nonsep_tr_ind)
]
# -------------- estimate observation probability -----------------
model.name = "{}_{}_{}".format(first_stage, horizon, boot)
obs_mat = estimate_observation_pr(
observations=dcopy(model.observations),
sepsis_data=dcopy(sepsis_data['train']),
nonsep_data=dcopy(nonsep_data['train']),
interval=1
)
# update observ matrix
def observ_func(observation, state, action):
"""observation function"""
obser_matrix = obs_mat
return obser_matrix.loc[
"{}".format(state), observation
]
model.observ_func = observ_func
logging.info("Problem Loaded!\n")
# ---------------------- solving --------------------------
solve_time = time.time()
if not solve:
alpha_vectors = pickle.load(open(
'solutions/{}-{}-boot_{}.pickle'.format(
first_stage, horizon, boot
), 'rb'
))
else:
alpha_vectors = PBVI_OS(
POMDP_OS=model, epsilon=epsilon,
iterations=10, fig_dir='figures/solution'
)
pickle.dump(alpha_vectors, open(
'solutions/{}-{}-boot_{}.pickle'.format(
first_stage, horizon, boot
), 'wb'
))
logging.info("Solving Time = {}\n".format(
time.time() - solve_time
))
# -------------------- testing -------------------------
logging.info("Testing...")
prediciton_time, sepsis_cohort, nonsep_cohort = [], [], []
bm_prediciton_time = []
bm_sepsis_cohort, bm_nonsep_cohort = [], []
for test_name in ["sepsis", "nonsep"]:
if test_name == "sepsis":
test_data = sepsis_data['test']
iter_list = range(int(ratio * test_data.shape[0]))
elif test_name == "nonsep":
test_data = nonsep_data['test']
iter_list = range(test_data.shape[0])
# for each patient
for i in iter_list:
# ------------ benchmark test -----------------
bm_result = []
for t in range(len(test_data.iloc[i, ])):
if test_data.iloc[i, t] > bm_threshold:
bm_result.append(1)
else:
bm_result.append(0)
try:
bm_prediciton_time.append(np.sum([
-1 * (observ_horizon + 60),
observ_interval * bm_result.index(1)
]))
if test_name == "sepsis":
bm_sepsis_cohort.append(1)
elif test_name == "nonsep":
bm_nonsep_cohort.append(1)
except ValueError:
if test_name == "sepsis":
bm_sepsis_cohort.append(0)
elif test_name == "nonsep":
bm_nonsep_cohort.append(0)
# --------------- POMDP test ----------------
result = test_POMDP(
POMDP=model, policy=alpha_vectors,
test_data=test_data.iloc[i], status=test_name
)
try:
prediciton_time.append(np.sum([
-1 * (observ_horizon + 60),
observ_interval * result.index("sepsis")
]))
if test_name == "sepsis":
sepsis_cohort.append(1)
elif test_name == "nonsep":
nonsep_cohort.append(1)
except ValueError:
if test_name == "sepsis":
sepsis_cohort.append(0)
elif test_name == "nonsep":
nonsep_cohort.append(0)
# ----------------- benchmark statistics ----------------
tn, fp, fn, tp = confusion_matrix(
y_true=[0] * len(bm_nonsep_cohort) + [1] * len(bm_sepsis_cohort),
y_pred=bm_nonsep_cohort + bm_sepsis_cohort
).ravel()
bm_sensitivity[boot] = tp / (tp + fn)
bm_specificity[boot] = 'Inf' if tn + fp == 0 else tn / (tn + fp)
bm_precision[boot] = 'Inf' if tp + fp == 0 else tp / (tp + fp)
bm_f_1[boot] = 'Inf' if 2 * tp + fp + fn == 0 else 2*tp / (2*tp+fp+fn)
bm_ave_time[boot] = np.mean(bm_prediciton_time)
# ----------------- POMDP statistics -------------------
tn, fp, fn, tp = confusion_matrix(
y_true=[0] * len(nonsep_cohort) + [1] * len(sepsis_cohort),
y_pred=nonsep_cohort + sepsis_cohort
).ravel()
sensitivity[boot] = tp / (tp + fn)
specificity[boot] = 'Inf' if tn + fp == 0 else tn / (tn + fp)
precision[boot] = 'Inf' if tp + fp == 0 else tp / (tp + fp)
f_1[boot] = 'Inf' if 2 * tp + fp + fn == 0 else 2 * tp / (2*tp+fp+fn)
ave_time[boot] = np.mean(prediciton_time)
# ------------------ Output --------------------
bm_output(
model_name, bootstrap, bm_sensitivity, bm_specificity, bm_precision,
bm_f_1, bm_ave_time, first_stage, horizon
)
POMDP_output(
model_name, bootstrap, sensitivity, specificity, precision,
f_1, ave_time, first_stage, horizon
)
# --------------- Done ---------------
logging.info("Done!\n")
return {
'sens': list(sensitivity.values()),
'spec': list(specificity.values()),
'prec': list(precision.values()),
'f_1': list(f_1.values()),
'time': list(ave_time.values())
}
|
c6320d2638ee98931af8523085d37981095e9f14
| 29,541 |
def _endian_char(big) -> str:
"""
Returns the character that represents either big endian or small endian in struct unpack.
Args:
big: True if big endian.
Returns:
Character representing either big or small endian.
"""
return '>' if big else '<'
|
2e1a63ec593ca6359947385019bcef45cb3749c0
| 29,542 |
def planar_angle2D(v1, v2):
"""returns the angle of one vector relative to the other in the
plane defined by the normal (default is in the XY plane)
NB This algorithm avoids carrying out a coordinate transformation
of both vectors. However, it only works if both vectors are in that
plane to start with. """
return atan2(sin2D(v1, v2), cos_sim2D(v1, v2))
|
c244ce7a2bcd27e110062dba0c88f2537e0cb7dd
| 29,543 |
def test_agg_same_method_name(es):
"""
Pandas relies on the function name when calculating aggregations. This means if a two
primitives with the same function name are applied to the same column, pandas
can't differentiate them. We have a work around to this based on the name property
that we test here.
"""
# test with normally defined functions
def custom_primitive(x):
return x.sum()
Sum = make_agg_primitive(custom_primitive, input_types=[Numeric],
return_type=Numeric, name="sum")
def custom_primitive(x):
return x.max()
Max = make_agg_primitive(custom_primitive, input_types=[Numeric],
return_type=Numeric, name="max")
f_sum = ft.Feature(es["log"]["value"], parent_entity=es["customers"], primitive=Sum)
f_max = ft.Feature(es["log"]["value"], parent_entity=es["customers"], primitive=Max)
fm = ft.calculate_feature_matrix([f_sum, f_max], entityset=es)
assert fm.columns.tolist() == [f_sum.get_name(), f_max.get_name()]
# test with lambdas
Sum = make_agg_primitive(lambda x: x.sum(), input_types=[Numeric],
return_type=Numeric, name="sum")
Max = make_agg_primitive(lambda x: x.max(), input_types=[Numeric],
return_type=Numeric, name="max")
f_sum = ft.Feature(es["log"]["value"], parent_entity=es["customers"], primitive=Sum)
f_max = ft.Feature(es["log"]["value"], parent_entity=es["customers"], primitive=Max)
fm = ft.calculate_feature_matrix([f_sum, f_max], entityset=es)
assert fm.columns.tolist() == [f_sum.get_name(), f_max.get_name()]
|
638447c081d2a5dcf4b2377943146876b7438e2c
| 29,544 |
def dispatch(request):
"""If user is admin, then show them admin dashboard; otherwise redirect
them to trainee dashboard."""
if request.user.is_admin:
return redirect(reverse("admin-dashboard"))
else:
return redirect(reverse("trainee-dashboard"))
|
046107c46cbac5e7495fee19c4354a822c476a5b
| 29,545 |
def log_pdf_factor_analysis(X, W, mu, sigma):
""" log pdf of factor analysis
Args:
X: B X D
W: D X K
mu: D X 1
sigma: D X 1
Returns:
log likelihood
"""
Pi = tf.constant(float(np.pi))
diff_vec = X - mu
sigma_2 = tf.square(sigma)
# phi = tf.eye(K) * sigma_2
# M = tf.matmul(W, W, transpose_a=True) + phi
# using Sherman-Morrison-Woodbury formula to compute the inverse
# inv_M = tf.matrix_inverse(M)
# inv_cov = tf.eye(DIM) / sigma_2 + tf.matmul(
# tf.matmul(W, inv_M), W, transpose_b=True) / sigma_2
# using Sylvester's determinant identity to compute log determinant
# implementation 1: directly compute determinant
# log_det = tf.log(tf.matrix_determinant(M)) + 2.0 * (DIM - K) * tf.log(sigma)
# implementation 2: using Cholesky decomposition
# log_det = 2.0 * tf.reduce_sum(tf.log(tf.diag_part(tf.cholesky(M)))) + 2.0 * (
# DIM - K) * tf.log(sigma)
# phi = tf.eye(DIM) * sigma_2
phi = tf.diag(sigma_2)
M = phi + tf.matmul(W, W, transpose_b=True)
inv_cov = tf.matrix_inverse(M)
log_det = 2.0 * tf.reduce_sum(tf.log(tf.diag_part(tf.cholesky(M))))
log_likelihood = tf.matmul(
tf.matmul(diff_vec, inv_cov), diff_vec, transpose_b=True)
log_likelihood = tf.diag_part(log_likelihood)
log_likelihood += DIM * tf.log(2 * Pi)
log_likelihood += log_det
log_likelihood = tf.reduce_sum(log_likelihood) * (-0.5)
return log_likelihood
|
70eb515c3a7b7cc8ea49f6a0e79c11327629c7b5
| 29,546 |
import logging
def _assert_initial_conditions(scheduler_commands, num_compute_nodes):
"""Assert cluster is in expected state before test starts; return list of compute nodes."""
compute_nodes = scheduler_commands.get_compute_nodes()
logging.info(
"Assert initial condition, expect cluster to have {num_nodes} idle nodes".format(num_nodes=num_compute_nodes)
)
_assert_num_nodes_in_scheduler(scheduler_commands, num_compute_nodes)
_assert_compute_node_states(scheduler_commands, compute_nodes, expected_states=["idle"])
return compute_nodes
|
6a19830caf029dd2a28cdb2363988940610bbc14
| 29,547 |
from typing import Dict
from typing import Any
def _clean_parameters(parameters: Dict[str, Any]) -> Dict[str, str]:
""" Removes entries which have no value."""
return {k: str(v) for k, v in parameters.items() if v}
|
b8e911674baee7a656f2dc7ba68514c63f84290c
| 29,548 |
def delete(run_id):
"""Submits a request to CARROT's runs delete mapping"""
return request_handler.delete("runs", run_id)
|
8f106d83ba39995f93067a3f8eb67b430b8fd301
| 29,549 |
import math
def get_tile_lat_lng(zoom, x, y):
"""convert Google-style Mercator tile coordinate to
(lat, lng) of top-left corner of tile"""
# "map-centric" latitude, in radians:
lat_rad = math.pi - 2*math.pi*y/(2**zoom)
# true latitude:
lat_rad = gudermannian(lat_rad)
lat = lat_rad * 180.0 / math.pi
# longitude maps linearly to map, so we simply scale:
lng = -180.0 + 360.0*x/(2**zoom)
return (lat, lng)
|
6bf0e31b30930f3916112d6540e4387a72238586
| 29,550 |
def process_zdr_precip(procstatus, dscfg, radar_list=None):
"""
Keeps only suitable data to evaluate the differential reflectivity in
moderate rain or precipitation (for vertical scans)
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
ml_filter : boolean. Dataset keyword
indicates if a filter on data in and above the melting layer is
applied. Default True.
rmin : float. Dataset keyword
minimum range where to look for rain [m]. Default 1000.
rmax : float. Dataset keyword
maximum range where to look for rain [m]. Default 50000.
Zmin : float. Dataset keyword
minimum reflectivity to consider the bin as precipitation [dBZ].
Default 20.
Zmax : float. Dataset keyword
maximum reflectivity to consider the bin as precipitation [dBZ]
Default 22.
RhoHVmin : float. Dataset keyword
minimum RhoHV to consider the bin as precipitation
Default 0.97
PhiDPmax : float. Dataset keyword
maximum PhiDP to consider the bin as precipitation [deg]
Default 10.
elmax : float. Dataset keyword
maximum elevation angle where to look for precipitation [deg]
Default None.
ml_thickness : float. Dataset keyword
assumed thickness of the melting layer. Default 700.
fzl : float. Dataset keyword
The default freezing level height. It will be used if no
temperature field name is specified or the temperature field is
not in the radar object. Default 2000.
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
temp_field = None
iso0_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'ZDR':
zdr_field = 'differential_reflectivity'
if datatype == 'ZDRc':
zdr_field = 'corrected_differential_reflectivity'
if datatype == 'PhiDP':
phidp_field = 'differential_phase'
if datatype == 'PhiDPc':
phidp_field = 'corrected_differential_phase'
if datatype == 'RhoHV':
rhohv_field = 'cross_correlation_ratio'
if datatype == 'RhoHVc':
rhohv_field = 'corrected_cross_correlation_ratio'
if datatype == 'uRhoHV':
rhohv_field = 'uncorrected_cross_correlation_ratio'
if datatype == 'dBZc':
refl_field = 'corrected_reflectivity'
if datatype == 'dBZ':
refl_field = 'reflectivity'
if datatype == 'TEMP':
temp_field = 'temperature'
if datatype == 'H_ISO0':
iso0_field = 'height_over_iso0'
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if ((refl_field not in radar.fields) or
(rhohv_field not in radar.fields) or
(zdr_field not in radar.fields) or
(phidp_field not in radar.fields)):
warn('Unable to estimate ZDR in rain. Missing data')
return None, None
# if data in and above the melting layer has to be filtered determine the
# field to use
fzl = None
ml_filter = True
if 'ml_filter' in dscfg:
ml_filter = dscfg['ml_filter']
if ml_filter:
# determine which freezing level reference
temp_ref = 'temperature'
if temp_field is None and iso0_field is None:
warn('Field to obtain the freezing level was not specified. ' +
'Using fixed freezing level height')
temp_ref = 'fixed_fzl'
elif temp_field is not None:
if temp_field not in radar.fields:
warn('COSMO temperature field not available. ' +
'Using fixed freezing level height')
temp_ref = 'fixed_fzl'
elif iso0_field is not None:
if iso0_field not in radar.fields:
warn('Height over iso0 field not available. ' +
'Using fixed freezing level height')
temp_ref = 'fixed_fzl'
else:
temp_ref = 'height_over_iso0'
# determine freezing level height if necessary
if temp_ref == 'fixed_fzl':
if 'fzl' in dscfg:
fzl = dscfg['fzl']
else:
fzl = 2000.
warn('Freezing level height not defined. Using default ' +
str(fzl)+' m')
else:
temp_ref = None
# default values
rmin = 1000.
rmax = 50000.
zmin = 20.
zmax = 22.
rhohvmin = 0.97
phidpmax = 10.
elmax = None
thickness = 700.
# user defined values
if 'rmin' in dscfg:
rmin = dscfg['rmin']
if 'rmax' in dscfg:
rmax = dscfg['rmax']
if 'Zmin' in dscfg:
zmin = dscfg['Zmin']
if 'Zmax' in dscfg:
zmax = dscfg['Zmax']
if 'RhoHVmin' in dscfg:
rhohvmin = dscfg['RhoHVmin']
if 'PhiDPmax' in dscfg:
phidpmax = dscfg['PhiDPmax']
if 'elmax' in dscfg:
elmax = dscfg['elmax']
if 'ml_thickness' in dscfg:
thickness = dscfg['ml_thickness']
ind_rmin = np.where(radar.range['data'] > rmin)[0][0]
ind_rmax = np.where(radar.range['data'] < rmax)[0][-1]
zdr_precip = pyart.correct.est_zdr_precip(
radar, ind_rmin=ind_rmin, ind_rmax=ind_rmax, zmin=zmin,
zmax=zmax, rhohvmin=rhohvmin, phidpmax=phidpmax, elmax=elmax,
thickness=thickness, doc=15, fzl=fzl, zdr_field=zdr_field,
rhohv_field=rhohv_field, phidp_field=phidp_field,
temp_field=temp_field, iso0_field=iso0_field, refl_field=refl_field,
temp_ref=temp_ref)
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(
'differential_reflectivity_in_precipitation', zdr_precip)
return new_dataset, ind_rad
|
44b58f755a103756a2cd6726d19b3a7d958d09c3
| 29,551 |
import random
def get_initators(filepath, n_lines):
"""
Open text file with iniator words and sample random iniator for each line in the poem.
"""
with open(filepath, "r", encoding = "utf-8") as file:
# save indices of all keywords
loaded_text = file.read() # load text file
lines = loaded_text.splitlines() # seperate initiator lines
initiators_list = list(random.sample(lines, n_lines)) # sample random initators
return initiators_list
|
94792679a6ea4e0bb14afd5eb38b656a2cc8af67
| 29,552 |
def GSAOI_DARK():
"""
No. Name Ver Type Cards Dimensions Format
0 PRIMARY 1 PrimaryHDU 289 ()
1 1 ImageHDU 144 (2048, 2048) float32
2 2 ImageHDU 144 (2048, 2048) float32
3 3 ImageHDU 144 (2048, 2048) float32
4 4 ImageHDU 144 (2048, 2048) float32
"""
return download_from_archive("S20150609S0023.fits")
|
c1cea8420ef518027d14bcf4d430c772268c6024
| 29,553 |
import traceback
import time
def wrapLoop(loopfunc):
"""Wraps a thread in a wrapper function to restart it if it exits."""
def wrapped():
while True:
try:
loopfunc()
except BaseException:
print(f"Exception in thread {loopfunc},"
" restarting in 10s...")
traceback.print_exc()
else:
print(f"Thread {loopfunc} exited, restarting in 10s...")
time.sleep(10)
return wrapped
|
86c48bc850bb1cf17121130ee9349dd529acf5e3
| 29,554 |
def get_version(tp):
"""
Get Version based on input parameters
`tp` - Object of class: Transport
"""
response = None
try:
response = tp.send_data('proto-ver', '---')
except RuntimeError as e:
on_except(e)
response = ''
return response
|
276dae2599ec99906ea954aae8ad9f79eb2de7d7
| 29,555 |
def _decode_feed_ids(option_feeds):
"""
>>> _decode_feed_ids('123,456')
[123, 456]
"""
return [int(x) for x in option_feeds.strip().split(',')]
|
9218a170c445b3b8d83f08c39d1547c3ff6e2d20
| 29,556 |
def append_to_phase(phase, data, amt=0.05):
"""
Add additional data outside of phase 0-1.
"""
indexes_before = [i for i, p in enumerate(phase) if p > 1 - amt]
indexes_after = [i for i, p in enumerate(phase) if p < amt]
phase_before = [phase[i] - 1 for i in indexes_before]
data_before = [data[i] for i in indexes_before]
phase_after = [phase[i] + 1 for i in indexes_after]
data_after = [data[i] for i in indexes_after]
return (
np.concatenate((phase_before, phase, phase_after)),
np.concatenate((data_before, data, data_after)),
)
|
1b416e5352efdff9e578e77f8a068a8f6a446a38
| 29,557 |
def timedelta2s(t_diff):
"""return number of seconds from :class:`numpy.timedelta64` object
Args:
t_diff: time difference as :class:`numpy.timedelta64` object
Returns:
scalar corresponding to number of seconds
"""
return t_diff / np.timedelta64(1, 's')
|
47d3b41717c877aa9c57a0f2745b95888738523b
| 29,558 |
def window(MT_seq, WT_seq, window_size=5):
"""
Chop two sequences with a sliding window
"""
if len(MT_seq) != len(WT_seq):
raise Exception("len(MT_seq) != len(WT_seq)")
pos = []
mt = []
wt = []
for i in xrange(len(MT_seq) - window_size + 1):
pos.append(i)
mt.append(MT_seq[i:i+window_size])
wt.append(WT_seq[i:i+window_size])
dt = pd.DataFrame({"position": pos,
"MT": mt,
"WT": wt})
return dt
|
67fecea9ed7155a2c85e9cd7acae9ff5a17402e7
| 29,559 |
def splits_for_blast(target, NAME):
"""Create slices for BLAST
This function creates multiple slices of 400 nucleotides given an fasta
sequence. The step size is 50. This the gaps are excluded from the sequence.
Thats why sequences with less than 400 nucleotides are excluded.
Args:
target (np.array): Fasta sequence in an array.
NAME (str): Global variable. Internal index of SNAPPy for this fasta.
Returns:
List of fasta files slices. Each is a proper fasta.
"""
target_seq = target[1:]
no_gap_t = target_seq[target_seq != '-']
target_length = no_gap_t.shape[0]
sub_aligns =[ [[f'>{NAME}_{x}'] , no_gap_t[x:x+400]] for x in range(0, target_length, 50) if len(no_gap_t[x:x+400]) == 400]
return sub_aligns
|
6ad193fe494a6387fbb06d2c2a3b6a059b903a5f
| 29,560 |
from io import StringIO
def test_load_items_errors() -> None:
"""
Test error cases when creating a list of classification Items from a dataframe
"""
def load(csv_string: StringIO) -> str:
df = pd.read_csv(csv_string, sep=",", dtype=str)
numerical_columns = ["scalar2", "scalar1"]
non_image_channels = _get_non_image_dict(["label", "image2"], ["scalar2", "scalar1"])
with pytest.raises(Exception) as ex:
DataSourceReader(data_frame=df,
# Provide values in a different order from the file!
image_channels=["image2", "image1"],
image_file_column="path",
label_channels=["label"],
label_value_column="value",
# Provide values in a different order from the file!
non_image_feature_channels=non_image_channels,
numerical_columns=numerical_columns).load_data_sources()
return str(ex)
csv_string = StringIO("""subject,channel,path,value,scalar1
S1,image1,foo1.nii,,2.1
""")
assert "columns are missing: scalar2" in load(csv_string)
csv_string = StringIO("""subject,channel,path,scalar1,scalar2
S1,image1,foo1.nii,2.1,2.2
""")
assert "columns are missing: value" in load(csv_string)
csv_string = StringIO("""id,channel,path,value,scalar1,scalar2
S1,image,foo.nii
S1,label,,True,1.1,1.2
""")
assert "columns are missing: subject" in load(csv_string)
|
649358c42db33e178a4269ed48b186999903bbdb
| 29,561 |
import imghdr
def validate_image(stream):
"""
Ensure the images are valid and in correct format
Args:
stream (Byte-stream): The image
Returns:
str: return image format
"""
header = stream.read(512)
stream.seek(0)
format = imghdr.what(None, header)
if not format:
return None
return '.' + (format if format != 'jpeg' else 'jpg')
|
1a1976f5b009c2400071ebf572d886c1f7d12ab0
| 29,562 |
def my_mean(my_list):
"""Calculates the mean of a given list.
Keyword arguments:
my_list (list) -- Given list.
return (float) -- Mean of given list.
"""
return my_sum(my_list)/len(my_list)
|
2423d51bf457a85ee8a6a8f1505a729b6d1d3f6f
| 29,563 |
def pr(labels, predictions):
"""Compute precision-recall curve and its AUC.
Arguments:
labels {array} -- numpy array of labels {0, 1}
predictions {array} -- numpy array of predictions, [0, 1]
Returns:
tuple -- precision array, recall array, area float
"""
precision, recall, threshold = precision_recall_curve(labels, predictions)
area = auc(recall, precision)
return precision, recall, area, threshold
|
5cf18052875396483f7a76e4c6c0b55f1541803d
| 29,564 |
def grouped_evaluate(population: list, problem, max_individuals_per_chunk: int = None) -> list:
"""Evaluate the population by sending groups of multiple individuals to
a fitness function so they can be evaluated simultaneously.
This is useful, for example, as a way to evaluate individuals in parallel
on a GPU."""
if max_individuals_per_chunk is None:
max_individuals_per_chunk = len(population)
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
fitnesses = []
for chunk in chunks(population, max_individuals_per_chunk):
phenomes = [ ind.decode() for ind in chunk ]
fit = problem.evaluate_multiple(phenomes)
fitnesses.extend(fit)
for fit, ind in zip(fitnesses, population):
ind.fitness = fit
return population
|
ea43be334def0698272ba7930cc46dc84ce78de9
| 29,565 |
def ZeusPaypalAccounts(request):
""" Zeus Paypal Account Credentials """
if request.method == "GET":
return render(request, "lost-empire/site_templates/zeus/paypal_accounts.html")
|
34a0fc616beac2869d501d1652ccd7c9d8ff2489
| 29,566 |
def upper_tri_to_full(n):
"""Returns a coefficient matrix to create a symmetric matrix.
Parameters
----------
n : int
The width/height of the matrix.
Returns
-------
SciPy CSC matrix
The coefficient matrix.
"""
entries = n*(n+1)//2
val_arr = []
row_arr = []
col_arr = []
count = 0
for i in range(n):
for j in range(i, n):
# Index in the original matrix.
col_arr.append(count)
# Index in the filled matrix.
row_arr.append(j*n + i)
val_arr.append(1.0)
if i != j:
# Index in the original matrix.
col_arr.append(count)
# Index in the filled matrix.
row_arr.append(i*n + j)
val_arr.append(1.0)
count += 1
return sp.coo_matrix((val_arr, (row_arr, col_arr)),
(n*n, entries)).tocsc()
|
5fdca1868f0824d9539bd785aa99b20c6195b7c0
| 29,567 |
def sort_dnfs(x, y):
"""Sort dnf riders by code and riderno."""
if x[2] == y[2]: # same code
if x[2]:
return cmp(strops.bibstr_key(x[1]),
strops.bibstr_key(y[1]))
else:
return 0 # don't alter order on unplaced riders
else:
return strops.cmp_dnf(x[2], y[2])
|
ccf20fb43df26219ce934e18b2d036e3cf6d13b7
| 29,570 |
import torch
def gen_diag(dim):
"""generate sparse diagonal matrix"""
diag = torch.randn(dim)
a_sp = sparse.diags(diag.numpy(), format=args.format)
a_pt = _torch_from_scipy(a_sp)
return a_pt, a_sp
|
9ca2842553a1b6331347210bd1da049f53e67361
| 29,571 |
def one_of(patterns, eql=equal):
"""Return a predicate which checks an object matches one of the patterns.
"""
def oop(ob):
for p in patterns:
if validate_object(ob, p, eql=eql):
return True
return False
return oop
|
058f1f64780760d9e996858dcfad4c0a47c07448
| 29,572 |
def convert_to_MultiDiGraph(G):
"""
takes any graph object, loads it into a MultiDiGraph type Networkx object
:param G: a graph object
"""
a = nx.MultiDiGraph()
node_bunch = []
for u, data in G.nodes(data = True):
node_bunch.append((u,data))
a.add_nodes_from(node_bunch)
edge_bunch = []
for u, v, data in G.edges(data = True):
if 'Wkt' in data.keys():
data['Wkt'] = str(data['Wkt'])
edge_bunch.append((u,v,data))
a.add_edges_from(edge_bunch)
return a
|
bde49710bed50386bd7bb09816e6f18089ed8030
| 29,574 |
def x_to_world_transformation(transform):
"""
Get the transformation matrix from x(it can be vehicle or sensor)
coordinates to world coordinate.
Parameters
----------
transform : carla.Transform
The transform that contains location and rotation
Returns
-------
matrix : np.ndarray
The transformation matrx.
"""
rotation = transform.rotation
location = transform.location
# used for rotation matrix
c_y = np.cos(np.radians(rotation.yaw))
s_y = np.sin(np.radians(rotation.yaw))
c_r = np.cos(np.radians(rotation.roll))
s_r = np.sin(np.radians(rotation.roll))
c_p = np.cos(np.radians(rotation.pitch))
s_p = np.sin(np.radians(rotation.pitch))
matrix = np.identity(4)
# translation matrix
matrix[0, 3] = location.x
matrix[1, 3] = location.y
matrix[2, 3] = location.z
# rotation matrix
matrix[0, 0] = c_p * c_y
matrix[0, 1] = c_y * s_p * s_r - s_y * c_r
matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r
matrix[1, 0] = s_y * c_p
matrix[1, 1] = s_y * s_p * s_r + c_y * c_r
matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r
matrix[2, 0] = s_p
matrix[2, 1] = -c_p * s_r
matrix[2, 2] = c_p * c_r
return matrix
|
718227deea6a6be4a0b24ebf4eda40d78be20fcf
| 29,575 |
def protobuf_get_constant_type(proto_type) :
"""About protobuf write types see :
https://developers.google.com/protocol-buffers/docs/encoding#structure
+--------------------------------------+
+ Type + Meaning + Used For +
+--------------------------------------+
+ + + int32, int64, uint32+
+ 0 + Varint + uint64,sint32,sint64+
+ + + boolean, enum +
+--------------------------------------+
+ + + +
+ 1 + 64-bit + fixed64, sfixed64, +
+ + + double +
+--------------------------------------+
+ 2 + string + string +
+--------------------------------------+
+ 5 + 32-bit + float +
+--------------------------------------+
"""
if 'uInt32' == proto_type or \
'sInt32' == proto_type or \
'int32' == proto_type :
return 0
elif 'double' == proto_type :
return 1
elif 'string' == proto_type :
return 2
elif 'float' == proto_type :
return 5
return 2
|
46ce7e44f8499e6c2bdcf70a2bc5e84cb8786956
| 29,576 |
def edit_delivery_products(request, delivery):
"""Edit a delivery (name, state, products). Network staff only."""
delivery = get_delivery(delivery)
if request.user not in delivery.network.staff.all():
return HttpResponseForbidden('Réservé aux administrateurs du réseau '+delivery.network.name)
if request.method == 'POST': # Handle submitted data
_parse_form(request)
JournalEntry.log(request.user, "Edited products for delivery %s/%s", delivery.network.name, delivery.name)
return redirect('edit_delivery', delivery.id)
else: # Create and populate forms to render
vars = { 'user': request.user,
'delivery': delivery}
vars.update(csrf(request))
return render_to_response('edit_delivery_products.html', vars)
|
fc734e5ded0a17d20a79d36e8ae599ee763ea73a
| 29,577 |
import pprint
def format_locals(sys_exc_info):
"""Format locals for the frame where exception was raised."""
current_tb = sys_exc_info[-1]
while current_tb:
next_tb = current_tb.tb_next
if not next_tb:
frame_locals = current_tb.tb_frame.f_locals
return pprint.pformat(frame_locals)
current_tb = next_tb
|
b5a21f42c8543d9de060ff7be2b3ad6b23065de9
| 29,579 |
def binarize_garcia(label: str) -> str:
"""
Streamline Garcia labels with the other datasets.
:returns (str): streamlined labels.
"""
if label == 'hate':
return 'abuse'
else:
return 'not-abuse'
|
5cc26303e0c496d46b285e266604a38a0c88e8d7
| 29,580 |
def diagstack(K1,K2):
"""
combine two kernel matrices along the diagonal [[K1 0][0 K2]]. Use to have
two kernels in temporal sequence
Inputs
-------
K1, K2 : numpy arrays
kernel matrics
Returns
--------
matrix of kernel values
"""
r1,c1 = K1.shape
r2,c2 = K2.shape
Kt = np.hstack((K1,np.zeros([r1,c2])))
Kb = np.hstack((np.zeros([r2,c1]),K2))
return np.vstack((Kt,Kb))
|
6e163bf62ca2639e5bacebad6c03700b1056de2e
| 29,581 |
import string
def submit_new_inteface():
"""POST interface configuration from form data"""
global unassigned_ints, interface_nums
ip = None
mask = None
status = None
descr = None
vrf = None
negotiation = None
int_num = [i for i in request.form.get("interface") if i not in string.ascii_letters]
int_type = [i for i in request.form.get("interface") if i in string.ascii_letters]
interface = BuildInterface.Templates(''.join(int_type), ''.join(int_num))
if request.form.get('ip') and request.form.get('mask'):
ip = request.form.get('ip')
mask = request.form.get('mask')
if request.form.get('status'):
status = request.form.get('status')
if request.form.get('description'):
descr = request.form.get('description')
if request.form.get('vrf'):
vrf = request.form.get('vrf')
if request.form.get('negotiation'):
negotiation = request.form.get('negotiation')
config = interface.build_interface(ip, mask, status, descr, vrf, negotiation)
status = SendConfig.send_configuration(netconf_session, config)
if status == 'Success':
show_interfaces = GetInterfacesInfo.get_single_interfaces(netconf_session, request.form.get("interface"))
return jsonify({'data': render_template('new_interface_table.html', interfaces=show_interfaces)})
else:
return jsonify({'data': render_template('config_failed.html', status=status)})
|
f746071d1f1ce2c1bdd9c0b4d4401edbb1119c36
| 29,582 |
from typing import Optional
def component_clause(): # type: ignore
"""
component_clause =
type_prefix type_specifier array_subscripts? component_list
"""
return (
syntax.type_prefix,
syntax.type_specifier,
Optional(syntax.array_subscripts),
syntax.component_list,
)
|
cd788687645d028c39f7ad439aab1ee21e5ad495
| 29,583 |
import numpy as np
def clean_time_series(time, val, nPoi):
"""
Clean doubled time values and checks with wanted number of nPoi
:param time: Time.
:param val: Variable values.
:param nPoi: Number of result points.
"""
# Create shift array
Shift = np.array([0.0], dtype='f')
# Shift time to right and left and subtract
time_sr = np.concatenate((Shift, time))
time_sl = np.concatenate((time, Shift))
time_d = time_sl - time_sr
time_dn = time_d[0:-1]
# Get new values for time and val
tol = 1E-5
timen = time[time_dn > tol]
valn = val[time_dn > tol]
if len(timen) != nPoi:
raise ValueError(
"Error: In clean_time_series, length and number of results \
points do not match.")
return timen, valn
|
35a4cea11a0dbf33916f3df6f8aae5c508a0c838
| 29,584 |
from typing import Callable
def deep_se_print(func: Callable) -> Callable:
"""Transforms the function to print nested side effects.
Searches recursively for changes on deep inner attributes of the arguments.
Goes down a tree until it finds some element which has no __dict__. For
each element of the tree, if it has no __dict__ or if __eq__ is
user-defined, it verifies equality (==/__eq__) with the previous state.
"""
def g(*args):
previous_states = deepcopy(args)
arg_names = [str(previous_state) for previous_state in previous_states]
gen = (f"{previous_state}"
for previous_state in previous_states)
print(f"Call of {func.__qualname__} on args "
f"{tuple(gen)}:")
result = func(*args)
search_recursive(args, previous_states, arg_names)
return result
def search_recursive(args, previous_states, arg_names,
tree: str = ""):
for arg, previous_state, arg_name in zip(args,
previous_states, arg_names):
if (not hasattr(previous_state, "__dict__")
or _has_eq_defined(previous_state)):
if arg != previous_state:
print(f" {str(arg_name)}{tree} changed"
f" from {str(previous_state)}"
f" to {str(arg)}.")
else:
tree = f" of {arg}" + tree
for key, attr in vars(arg).items():
if not hasattr(attr, "__dict__"):
if attr != getattr(previous_state, key):
print(f" {str(key)}{tree}"
f" changed"
f" from {str(getattr(previous_state, key))}"
f" to {str(attr)}.")
continue
search_recursive(tuple(vars(attr).values()),
tuple(
vars(getattr(previous_state,
key)).values()
),
vars(attr).keys(),
f" of {str(attr)}" + tree)
return g
|
e5c2ee57f9f5ecd992ac36a30ac6e32c7afdbd8a
| 29,585 |
from pathlib import Path
def prepare_checkpoints(path_to_checkpoints:str, link_keys=["link1","link2","link3","link4"], real_data=True,*args, **kwargs)-> str:
""" The main function preparing checkpoints for pre-trained SinGANs of Polyp images.
Parameters
-----------
path_to_checkpoints: str
A directory path to download checkpoints.
link_keys: list
A list of link keys: link1, link2, link3, link4. One or multiple link keys can be put in this list.
real_data: bool
If True, the real images and masks used to train SinGANs will be downloaded to the checkpoint directory.
Return
------
checkpoint_paths_list, real_image_mask_pair_list
A sorted list of paths to downloaded checkpoints.
A sorted (image_path, mask_path) tuple list.
"""
all_links = load_configs()["links"]
real_data_links = load_configs()["real_data_links"]
#alls_in_one_dir = os.path.join(path_to_checkpoints, "all")
#os.makedirs(alls_in_one_dir, exist_ok=True)
checkpoint_paths = []
for link_key in link_keys:
print(all_links[link_key])
download_link = all_links[link_key]
directory = download_and_extract_single_file(download_link, path_to_checkpoints)
#print("Directory=", directory)
checkpoint_paths = checkpoint_paths + list(Path(directory).iterdir())
## moving checkpoints to root directory
#for sub_dir in tqdm(Path(directory).iterdir()):
#print(sub_dir)
# shutil.move(str(sub_dir), alls_in_one_dir)
checkpoint_paths_str = [str(p) for p in checkpoint_paths]
# Download and prepair real images and maks
real_data_paths = None
if real_data:
image_directory = download_and_extract_single_file(real_data_links["images_link"], path_to_checkpoints)
mask_directory = download_and_extract_single_file(real_data_links["masks_link"], path_to_checkpoints)
image_paths = list(Path(image_directory).iterdir())
mask_paths = list(Path(mask_directory).iterdir())
image_paths = [str(p) for p in image_paths]
mask_paths = [str(p) for p in mask_paths]
image_paths = natsorted(image_paths)
mask_paths = natsorted(mask_paths)
real_data_paths = list(zip(image_paths, mask_paths))
return natsorted(checkpoint_paths_str), real_data_paths
|
4e49a495b3dd587c4b9b350d5e329f7dab36ef30
| 29,586 |
def data_count():
"""
:return: 数据集大小
"""
return 300
|
1582c3782cd77ee79727a7874afbb74539f3ff9e
| 29,587 |
import logging
import re
def grid_name_lookup(engine):
"""Constructs a lookup table of Institute names to ids by combining names with
aliases and cleaned names containing country names in brackets. Multinationals are
detected.
Args:
engine (:obj:`sqlalchemy.engine.base.Engine`): connection to the database
Returns:
(:obj:`list` of :obj:`dict`): lookup table [{name: [id1, id2, id3]}]
Where ids are different country entities for multinational institutes.
Most entities just have a single [id1]
"""
institute_name_id_lookup = defaultdict(set)
with db_session(engine) as session:
for institute in session.query(Institute).all():
name = institute.name.lower()
institute_name_id_lookup[name].add(institute.id)
logging.info(f"{len(institute_name_id_lookup)} institutes in GRID")
for alias in session.query(Alias).all():
name = alias.alias.lower()
institute_name_id_lookup[name].add(alias.grid_id)
logging.info(f"{len(institute_name_id_lookup)} institutes after adding aliases")
# look for institute names containing brackets: IBM (United Kingdom)
n_countries = 0
for bracketed in (session
.query(Institute)
.filter(Institute.name.contains('(') & Institute.name.contains(')'))
.all()):
found = re.match(r'(.*) \((.*)\)', bracketed.name)
if found:
# results: s/country name/institute --> the processed to give {"ibm" : {grid_id1, grid_id2}}
name = found.groups()[0].lower()
institute_name_id_lookup[name].add(bracketed.id)
n_countries += 1
logging.info(f"{n_countries} institutes with country name in the title")
# Convert to dict --> list
institute_name_id_lookup = {k: list(v) for k, v in institute_name_id_lookup.items()}
logging.info(f"{len(institute_name_id_lookup)} total institute names in lookup")
return institute_name_id_lookup
|
0a0fef49c722d6c8c40e2d00f1d87d8f41efbbef
| 29,588 |
def sample_vMF(theta, kappa,size=1):
"""
Sampling from vMF
This is based on the implementation I found online here:
http://stats.stackexchange.com/questions/156729/sampling-from-von-mises-fisher-distribution-in-python
(**** NOTE THE FIX BY KEVIN *****)
which is based on :
Directional Statistics (Mardia and Jupp, 1999) and on the Ulrich-Wood's algorithm for sampling.
"""
warn('Not sure about sampling vMF, use with caution!!!! ')
#print "kappa : ", kappa
#print "norm direction :" , np.linalg.norm(theta)
np.testing.assert_array_almost_equal( np.linalg.norm(theta) , 1 )
#print "kappa : ", kappa
assert kappa > 0 , "kappa must be positive !"
if np.ndim(theta)==2:
theta = np.squeeze(theta)
assert np.ndim(theta)==1, "theta should be one one-dimensional!"
res_sampling = _rvMF(size, kappa * theta)
return np.vstack(res_sampling)
|
1e8d327b5613d9f2e5f77c26eab86d09d9d8338b
| 29,589 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.