content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _rk4(dparam=None, k0=None, y=None, kwdargs=None):
"""
a traditional RK4 scheme, with:
- y = array of all variables
- p = parameter dictionnary
dt is contained within p
"""
if 'itself' in dparam[k0]['kargs']:
dy1 = dparam[k0]['func'](itself=y, **kwdargs)
dy2 = dparam[k0]['func'](itself=y+dy1/2., **kwdargs)
dy3 = dparam[k0]['func'](itself=y+dy2/2., **kwdargs)
dy4 = dparam[k0]['func'](itself=y+dy3, **kwdargs)
else:
dy1 = dparam[k0]['func'](**kwdargs)
dy2 = dparam[k0]['func'](**kwdargs)
dy3 = dparam[k0]['func'](**kwdargs)
dy4 = dparam[k0]['func'](**kwdargs)
return (dy1 + 2*dy2 + 2*dy3 + dy4) * dparam['dt']['value']/6.
|
a44e177e6925c36fa9355ed9c5ee41d0604d01bd
| 28,782 |
def generate_discord_markdown_string(lines):
"""
Wraps a list of message into a discord markdown block
:param [str] lines:
:return: The wrapped string
:rtype: str
"""
output = ["```markdown"] + lines + ["```"]
return "\n".join(output)
|
1c0db2f36f4d08e75e28a1c024e6d4c35638d8f5
| 28,783 |
from typing import Optional
def _sanitize_ndim(
result: ArrayLike, data, dtype: Optional[DtypeObj], index: Optional[Index]
) -> ArrayLike:
"""
Ensure we have a 1-dimensional result array.
"""
if getattr(result, "ndim", 0) == 0:
raise ValueError("result should be arraylike with ndim > 0")
elif result.ndim == 1:
# the result that we want
result = _maybe_repeat(result, index)
elif result.ndim > 1:
if isinstance(data, np.ndarray):
raise ValueError("Data must be 1-dimensional")
if is_object_dtype(dtype) and isinstance(dtype, ExtensionDtype):
# i.e. PandasDtype("O")
result = com.asarray_tuplesafe(data, dtype=object)
cls = dtype.construct_array_type()
result = cls._from_sequence(result, dtype=dtype)
else:
result = com.asarray_tuplesafe(data, dtype=dtype)
return result
|
6a1e49e07658ea3f7b9e80915c73464548715419
| 28,784 |
from typing import Callable
from typing import Any
from re import T
from typing import List
from typing import Dict
def from_list_dict(f: Callable[[Any], T], x: Any) -> List[Dict[str, T]]:
"""Parses list of dictionaries, applying `f` to the dictionary values.
All items must be dictionaries.
"""
assert isinstance(x, list)
assert all(isinstance(d, dict) for d in x)
return [ { k: f(v) for (k, v) in d.items() } for d in x]
|
2a1316098165367e8657d22717245a6c695cb96e
| 28,785 |
def TSTR_eICU(identifier, epoch):
"""
"""
# get "train" data
exp_data = np.load('./experiments/tstr/' + identifier + '_' + str(epoch) + '.data.npy').item()
X_synth = exp_data['synth_data']
Y_synth = exp_data['synth_labels']
n_synth = X_synth.shape[0]
X_synth = X_synth.reshape(n_synth, -1)
# get test data
data = np.load('./data/eICU_task_data.npy').item()
X_test = data['X_test']
Y_test = data['Y_test']
# iterate over labels
results = []
for label in range(Y_synth.shape[1]):
print('task:', data['Y_columns'][label])
print('(', np.mean(Y_synth[:, label]), 'positive in train, ', np.mean(Y_test[:, label]), 'in test)')
#model = RandomForestClassifier(n_estimators=50).fit(X_synth, Y_synth[:, label])
model = SVC(gamma=0.001).fit(X_synth, Y_synth[:, label])
predict = model.predict(X_test)
print('(predicted', np.mean(predict), 'positive labels)')
accuracy = sklearn.metrics.accuracy_score(Y_test[:, label], predict)
precision = sklearn.metrics.precision_score(Y_test[:, label], predict)
recall = sklearn.metrics.recall_score(Y_test[:, label], predict)
print('\tacc:', accuracy, '\tprec:', precision, '\trecall:', recall)
results.append([accuracy, precision, recall])
# do the OR task
extreme_heartrate_test = Y_test[:, 1] + Y_test[:, 4]
extreme_respiration_test = Y_test[:, 2] + Y_test[:, 5]
extreme_systemicmean_test = Y_test[:, 3] + Y_test[:, 6]
Y_OR_test = np.vstack([extreme_heartrate_test, extreme_respiration_test, extreme_systemicmean_test]).T
Y_OR_test = (Y_OR_test > 0)*1
extreme_heartrate_synth = Y_synth[:, 1] + Y_synth[:, 4]
extreme_respiration_synth = Y_synth[:, 2] + Y_synth[:, 5]
extreme_systemicmean_synth = Y_synth[:, 3] + Y_synth[:, 6]
Y_OR_synth = np.vstack([extreme_heartrate_synth, extreme_respiration_synth, extreme_systemicmean_synth]).T
Y_OR_synth = (Y_OR_synth > 0)*1
OR_names = ['extreme heartrate', 'extreme respiration', 'extreme MAP']
OR_results = []
for label in range(Y_OR_synth.shape[1]):
print('task:', OR_names[label])
print('(', np.mean(Y_OR_synth[:, label]), 'positive in train, ', np.mean(Y_OR_test[:, label]), 'in test)')
model = RandomForestClassifier(n_estimators=50).fit(X_synth, Y_OR_synth[:, label])
predict = model.predict(X_test)
print('(predicted', np.mean(predict), 'positive labels)')
accuracy = sklearn.metrics.accuracy_score(Y_OR_test[:, label], predict)
precision = sklearn.metrics.precision_score(Y_OR_test[:, label], predict)
recall = sklearn.metrics.recall_score(Y_OR_test[:, label], predict)
print(accuracy, precision, recall)
OR_results.append([accuracy, precision, recall])
return results, OR_results
|
8f719e94689b1354e6463935e6dbdc2c5a110779
| 28,786 |
def wizard_active(step, current):
"""
Return the proper classname for the step div in the badge wizard.
The current step needs a 'selected' class while the following step needs a
'next-selected' class to color the tip of the arrow properly.
"""
if current == step:
return 'selected'
elif (current + 1) == step:
return 'next-selected'
|
2daad3f7651df7609f3473af698e116ce419c9df
| 28,787 |
def set_token(token: OAuth2Token):
"""Set dynamics client token in a thread, so it can be done in an async context."""
def task():
name = "dynamics-client-token"
expires = int(token["expires_in"]) - 60
cache.set(name, token, expires)
with ThreadPoolExecutor() as executor:
future = executor.submit(task)
return future.result()
|
61b4bfa3dbe1ddd03ff608a476f34905ec2440e9
| 28,788 |
def pFind_clumps(f_list, n_smooth=32, param=None, arg_string=None, verbose=True):
"""
A parallel implementation of find_clumps. Since SKID is not parallelized
this can be used to run find_clumps on a set of snapshots from one
simulation.
**ARGUMENTS**
f_list : list
A list containing the filenames of snapshots OR the tipsy snapshots
n_smooth : int (optional)
Number of nearest neighbors used for particle smoothing in the
simulation. This is used in the definition of a density threshold
for clump finding.
param : str (optional)
filename for a tipsy .param file
arg_string : str (optional)
Additional arguments to be passed to SKID. Cannot use -tau, -d, -m, -s, -o
verbose : bool
Verbosity flag. Default is True
**RETURNS**
clumpnum_list : list
A list containing the particle clump assignment for every snapshot in
f_list. clumps[i][j] gives the clump number for particle j in
snapshot i.
"""
# Number of processes to create = number of cores
n_proc = cpu_count()
# Set up the arguments for calls to find_clumps
arg_list = []
for i, f_name in enumerate(f_list):
arg_list.append([f_name, n_smooth, param, arg_string, i, verbose])
print arg_list
# Set up the pool
pool = Pool(n_proc)
# Run the job in parallel
results = pool.map(_parallel_find_clumps, arg_list, chunksize=1)
pool.close()
pool.join()
return results
|
85e2c80f3fdb95f2c324b8b934550788faa6c5bb
| 28,789 |
import math
def gamma_dis(x):
"""fix gamma = 2
https://www.itl.nist.gov/div898/handbook/eda/section3/eda366b.htm
"""
x = round(x, 14)
res = round(x*math.exp(-x) / TAU_2, 14)
return res
|
a3375b7ae16755d0dab47ecd4f54ebc8c40143b9
| 28,790 |
import calendar
def get_month_number(year):
""" Function to get month from the user input. The month should be number from 1-12.
:returns: the number of month enterd by user
:rtype: int
"""
year = int(year)
while True:
val = input("Please, enter the number of month? (1-12)\n")
closed = False
try:
month = int(val)
if (month <= 0 and month > 12):
continue
closed = True
except ValueError:
print("Invalid number!")
continue
if closed:
week_day = calendar.monthrange(year, month) # get the tuple where the first number is weekday of first day of the month and second is number of days in month
date1 = pd.Timestamp(date(year, month, 1))
date2 = pd.Timestamp(date(year, month, week_day[1]))
month_name = f'{date1.strftime("%B")}_{year}' # get the name of month
break
return month_name, date1, date2
|
c2d0f5010b8f1de6d1764a216c43eb1901c8093c
| 28,792 |
def reconstruct_with_whole_molecules(struct):
""" Build smallest molecule representation of struct.
"""
rstruct = Structure()
rstruct.set_lattice_vectors(struct.get_lattice_vectors())
molecule_struct_list = get_molecules(struct)
for molecule_struct in molecule_struct_list:
geo_array = molecule_struct.get_geo_array()
ele = molecule_struct.geometry['element']
for i,coord in enumerate(geo_array):
rstruct.append(coord[0],coord[1],coord[2],ele[i])
return rstruct
|
f3595fdd23e22fc0c24b9a7cfa6e000206eda93f
| 28,793 |
def _json_serialize_no_param(cls):
""" class decorator to support json serialization
Register class as a known type so it can be serialized and deserialzied
properly
"""
return _patch(cls, _get_type_key(cls), 0)
|
3eaf4c7c53694c316898b1a9e4d41dc4b212afed
| 28,794 |
def aireTriangle(a,b,c):
"""
Aire du triangle abc dans l'espace.
C'est la moitié de la norme du produit vectoriel ab vect ac
"""
u,v=b-a,c-a
r=u[2]*v[0]-u[0]*v[2]
s=u[0]*v[1]-u[1]*v[0]
t=u[1]*v[2]-u[2]*v[1]
return 0.5*sqrt(r*r+s*s+t*t)
|
641aa598d36189c787b91af4a98734f2289173e0
| 28,795 |
def ez_admin(admin_client, admin_admin, skip_auth):
"""A Django test client that has been logged in as admin. When EZID endpoints are
called via the client, a cookie for an active authenticated session is included
automatically. This also sets the admin password to "admin".
Note: Because EZID does not use a standard authentication procedure, it's also
necessary to pull in skip_auth here.
"""
admin_client.login(username='admin', password='admin')
#log.info('cookies={}'.format(admin_client.cookies))
return admin_client
|
0b2ac749a690ad5ac0dc83ca9c8f3905da5a016b
| 28,796 |
import textwrap
def _template_message(desc, descriptor_registry):
# type: (Descriptor, DescriptorRegistry) -> str
"""
Returns cls_def string, list of fields, list of repeated fields
"""
desc = SimpleDescriptor(desc)
descriptor_registry[desc.identifier] = desc
slots = desc.field_names
# NOTE: the "pass" statement is a hack to provide a body when args is empty
initialisers = ['pass']
initialisers += [
'self.{} = self.{}()'.format(field_name, field_type)
for field_name, field_type in (desc.inner_fields)
]
initialisers += [
'self.{} = {}()'.format(field_name, field_type)
for field_name, field_type in (desc.external_fields)
]
initialisers += [
'self.{} = []'.format(field_name)
for field_name in desc.repeated_fields
]
args = ['self'] + ['{}=None'.format(f) for f in slots]
init_str = 'def __init__({argspec}):\n{initialisers}\n'.format(
argspec=', '.join(args),
initialisers=textwrap.indent('\n'.join(initialisers), ' '),
)
helpers = ""
if desc.options.map_entry:
# for map <key, value> fields
# This mirrors the _IsMessageMapField check
value_type = desc.fields_by_name['value']
if value_type.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE:
base_class = MessageMap
else:
base_class = ScalarMap
# Rather than (key, value), use the attributes of the correct
# MutableMapping type as the "slots"
slots = tuple(m for m in dir(base_class) if not m.startswith("_"))
helpers = 'def __getitem__(self, idx):\n pass\n'
helpers += 'def __delitem__(self, idx):\n pass\n'
body = ''.join([
_template_enum(d, descriptor_registry) for d in desc.enum_types
] + [
_template_message(d, descriptor_registry) for d in desc.nested_types
])
cls_str = (
'class {name}(object):\n'
' {docstring!r}\n'
' __slots__ = {slots}\n'
'{helpers}{body}{init}\n'
).format(
name=desc.name,
docstring="descriptor={}".format(desc.identifier),
slots=slots,
body=textwrap.indent(body, ' '),
helpers=textwrap.indent(helpers, ' '),
init=textwrap.indent(init_str, ' '),
)
return cls_str
|
2586ffe0b81ea683a40bc20700ddb970fc385962
| 28,797 |
import re
def parse_archive(path, objdump):
"""Parses a list of ObjectFiles from an objdump archive output.
Args:
path: String path to the archive.
objdump: List of strings of lines of objdump output to parse.
Returns:
List of ObjectFile objects representing the objects contained within the
archive.
"""
object_files = []
current_file = None
for line in objdump:
if not line:
continue
match = re.match(r'^(.*[^\)])(\((.+)\))?:\s+file format', line)
if match:
filename = match.group(3) if match.group(3) else match.group(1)
current_file = ObjectFile(filename, path)
object_files.append(current_file)
continue
if not current_file:
raise Exception('Archive does not specify object to attribute '
'symbols to ' + path)
sym = parse_symbol(line)
if not sym:
if current_file.symbols:
current_file = None
continue
current_file.symbols.append(sym)
return object_files
|
1f30804ba1d723bf8656dd26f522c5a369db4b3d
| 28,798 |
async def async_validate_trigger_config(
hass: HomeAssistant, config: ConfigType
) -> ConfigType:
"""Validate config."""
config = TRIGGER_SCHEMA(config)
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(config[CONF_DEVICE_ID])
trigger = config[CONF_TYPE]
if (
not device
or device.model not in DEVICES
or trigger not in DEVICES[device.model]
):
raise InvalidDeviceAutomationConfig(f"Unsupported model {device.model}")
return config
|
f43e1b58bd37e0cf989da8076505cf34c4386830
| 28,799 |
def get_vgg_dilate_conv(data):
"""
vgg-16
shared convolutional layers,use dilated convolution in group 5
:param data: Symbol
:return: Symbol
"""
# ====group 1
conv1_1 = mx.symbol.Convolution(data=data, kernel=(3,3), pad=(1,1), \
num_filter=64, name='conv1_1')
relu1_1 = mx.symbol.Activation(data=conv1_1, act_type='relu', \
name='relu1_1')
conv1_2 = mx.symbol.Convolution(data=relu1_1,kernel=(3,3), pad=(1,1), \
num_filter=64, name='conv1_2')
relu1_2 = mx.symbol.Activation(data=conv1_2, act_type='relu', \
name='relu1_2')
pool1 = mx.symbol.Pooling(data=relu1_2, pool_type="max", kernel=(2,2), \
stride=(2,2), name="pool1")
# ======group 2
conv2_1 = mx.symbol.Convolution(data=pool1, kernel=(3,3), pad=(1,1), \
num_filter=128, name='conv2_1')
relu2_1 = mx.symbol.Activation(data=conv2_1, act_type="relu", \
name="relu2_1")
conv2_2 = mx.symbol.Convolution(data=relu2_1, kernel=(3,3), pad=(1,1), \
num_filter=128, name="conv2_2")
relu2_2 = mx.symbol.Activation(data=conv2_2, act_type="relu", \
name="relu2_2")
pool2 = mx.symbol.Pooling(data=relu2_2, pool_type="max",kernel=(2,2), \
stride=(2,2), name="pool2")
# ======group 3
conv3_1 = mx.symbol.Convolution(data=pool2, kernel=(3,3), pad=(1,1), \
num_filter=256, name="conv3_1")
relu3_1 = mx.symbol.Activation(data=conv3_1, act_type='relu', \
name='relu3_1')
conv3_2 = mx.symbol.Convolution(data=relu3_1, kernel=(3,3), pad=(1,1), \
num_filter=256, name='conv3_2')
relu3_2 = mx.symbol.Activation(data=conv3_2, act_type='relu', \
name='relu3_2')
conv3_3 = mx.symbol.Convolution(data=relu3_2, kernel=(3,3), pad=(1,1), \
num_filter=256, name="conv3_3")
relu3_3 = mx.symbol.Activation(data=conv3_3, act_type='relu', \
name='relu3_3')
pool3 = mx.symbol.Pooling(data=relu3_3, pool_type='max', kernel=(2,2), \
stride=(2,2), name="pool3")
# ======group 4
conv4_1 = mx.symbol.Convolution(data=pool3, kernel=(3,3), pad=(1, 1), \
num_filter=512, name='conv4_1')
relu4_1 = mx.symbol.Activation(data=conv4_1, act_type='relu', \
name='relu4_1')
conv4_2 = mx.symbol.Convolution(data=relu4_1,kernel=(3,3), pad=(1,1), \
num_filter=512, name='conv4_2')
relu4_2 = mx.symbol.Activation(data=conv4_1,act_type='relu',\
name='relu4_2')
conv4_3 = mx.symbol.Convolution(data=relu4_2,kernel=(3,3), pad=(1,1), \
num_filter=512, name='conv4_3')
relu4_3 = mx.symbol.Activation(data=conv4_3, act_type='relu', \
name='relu4_3')
pool4 = mx.symbol.Pooling(data=relu4_3,pool_type='max', kernel=(2,2), \
stride=(2,2), name='pool4')
# ======group5
# ======use dilation conv
conv5_1 = mx.symbol.Convolution(data=pool4, kernel=(3,3), pad=(2,2), \
dilate=(2,2), num_filter=512, \
name='conv5_1')
relu5_1 = mx.symbol.Activation(data=conv5_1, act_type='relu', \
name='relu5_1')
conv5_2 = mx.symbol.Convolution(data=relu5_1, kernel=(3,3), pad=(2,2), \
dilate=(2,2), num_filter=512, \
name='conv5_2')
relu5_2 = mx.symbol.Activation(data=conv5_2, act_type='relu', \
name='relu5_2')
conv5_3 = mx.symbol.Convolution(data=relu5_2, kernel=(3,3), pad=(2,2), \
dilate=(2,2), num_filter=512, \
name='conv5_3')
relu5_3 = mx.symbol.Activation(data=conv5_3, act_type='relu', \
name='relu5_3')
return relu5_3
|
12a25c10e10f2648f21c9e75ae0e2ef034541be1
| 28,801 |
from typing import Dict
import ast
def _get_func_aliases(tree) -> Dict:
"""
Get __func_alias__ dict for mapping function names
"""
fun_aliases = {}
assignments = [node for node in tree.body if isinstance(node, ast.Assign)]
for assign in assignments:
try:
if assign.targets[0].id == "__func_alias__":
for key, value in zip_longest(assign.value.keys, assign.value.values):
fun_aliases.update({key.s: value.s})
except AttributeError:
pass
return fun_aliases
|
13967f2e1ea5e31db8cc700915d9b8af9cbc609d
| 28,802 |
def admin_only(view_func):
"""Restrict page access to only admins"""
@wraps(view_func)
def wrapper_function(request, *args, **kwargs):
group = None
if request.user.groups.exists():
group = request.user.groups.all()[0].name
if group == 'profile':
return redirect('earlydating-yourprofile')
if group == 'admin':
return view_func(request, *args, **kwargs)
return wrapper_function
|
6facd5bed6f48541bd9db678606a805a75983232
| 28,803 |
def _im_distance(adj1, adj2, hwhm):
"""Computes the Ipsen-Mikhailov distance for two symmetric adjacency
matrices
Note : Requires networks with the same number of nodes. The networks
can be directed and weighted (with weights in the range [0,1]).
Params
------
adj1, adj2 (array): adjacency matrices.
hwhm (float) : hwhm of the lorentzian distribution.
Returns
-------
dist (float) : Ipsen-Mikhailov distance.
"""
N = len(adj1)
#get laplacian matrix
L1 = laplacian(adj1, normed=False)
L2 = laplacian(adj2, normed=False)
#get the modes for the positive-semidefinite laplacian
w1 = np.sqrt(np.abs(eigh(L1)[0][1:]))
w2 = np.sqrt(np.abs(eigh(L2)[0][1:]))
#we calculate the norm for both spectrum
norm1 = (N-1)*np.pi/2 - np.sum(np.arctan(-w1/hwhm))
norm2 = (N-1)*np.pi/2 - np.sum(np.arctan(-w2/hwhm))
#define both spectral densities
density1 = lambda w: np.sum(hwhm/((w - w1)**2 + hwhm**2))/norm1
density2 = lambda w: np.sum(hwhm/((w - w2)**2 + hwhm**2))/norm2
func = lambda w: (density1(w) - density2(w))**2
return np.sqrt(quad(func, 0, np.inf)[0])
|
4e12000194d570235e88fbe36513cca221b43e49
| 28,804 |
def get_movement_endtime(dT, T):
"""
Returns the end time of the movement assuming that the start time is zero.
"""
_t = 0.
for i, _dT in enumerate(T):
_t = np.max([_t, np.sum(dT[:i]) + T[i]])
return _t
|
aca310be41dfbe7292f8f66f9c50159365570045
| 28,806 |
def getGroupUpcomingEvents(request, group):
"""
Return all the upcoming events that are assigned to the specified group.
:param request: Django request object
:param group: for this group page
:rtype: list of the namedtuple ThisEvent (title, page, url)
"""
# Get events that are a child of a group page, or a postponement or extra
# info a child of the recurring event child of the group
rrEvents = RecurringEventPage.events(request).exclude(group_page=group) \
.upcoming().child_of(group).this()
qrys = [SimpleEventPage.events(request).exclude(group_page=group)
.upcoming().child_of(group).this(),
MultidayEventPage.events(request).exclude(group_page=group)
.upcoming().child_of(group).this(),
rrEvents]
for rrEvent in rrEvents:
qrys += [PostponementPage.events(request).child_of(rrEvent.page)
.upcoming().this(),
ExtraInfoPage.events(request).exclude(extra_title="")
.child_of(rrEvent.page).upcoming().this()]
# Get events that are linked to a group page, or a postponement or extra
# info a child of the recurring event linked to a group
rrEvents = group.recurringeventpage_set(manager='events').auth(request) \
.upcoming().this()
qrys += [group.simpleeventpage_set(manager='events').auth(request)
.upcoming().this(),
group.multidayeventpage_set(manager='events').auth(request)
.upcoming().this(),
rrEvents]
for rrEvent in rrEvents:
qrys += [PostponementPage.events(request).child_of(rrEvent.page)
.upcoming().this(),
ExtraInfoPage.events(request).exclude(extra_title="")
.child_of(rrEvent.page).upcoming().this()]
events = sorted(chain.from_iterable(qrys),
key=attrgetter('page._upcoming_datetime_from'))
return events
|
ae61166a96635e597452181c30dc7d2945f1e4af
| 28,807 |
def get_marginal_probabilities(probs_table):
"""
Get the marginal probability of each event given a
contingency table.
"""
ind = []
for transp in get_transpositions(probs_table):
marginal_probs = [probs_table.transpose(transp)[1, ...].sum(),
probs_table.transpose(transp)[0, ...].sum()]
ind.append(marginal_probs)
return np.array(ind)
|
e222a11669b0c9920d875960a1ccea9e9e8a971c
| 28,808 |
def get_graph_map():
"""Get the graph rail network mapbox map.
Returns:
go.Figure: Scattermapbox of rail network graph
"""
# Get the nodes and edges and pandas dataframes from the database
nodes, edges = get_berths()
if nodes is None or edges is None:
return None
# Plot the edges as lines between the nodes
graph_map = go.Figure(
go.Scattermapbox(
mode="lines",
lat=edges["LATITUDE"].tolist(),
lon=edges["LONGITUDE"].tolist(),
line=dict(width=1.0, color="#888"),
hoverinfo="none",
)
)
# Plot the nodes with markers depending on if a train is present
graph_map.add_trace(
go.Scattermapbox(
mode="markers",
lat=nodes["LATITUDE"].tolist(),
lon=nodes["LONGITUDE"].tolist(),
marker=go.scattermapbox.Marker(
size=nodes["SIZE"].tolist(), color=nodes["COLOUR"].tolist(), opacity=0.7
),
hovertext=nodes["TEXT"].tolist(),
hoverinfo="text",
)
)
# Update the mapbox layout
graph_map.update_layout(
autosize=True,
height=1000,
hovermode="closest",
showlegend=False,
mapbox=dict(
accesstoken=app.server.config["DASH_MAPBOX_TOKEN"],
style="light",
pitch=0,
zoom=9,
center=go.layout.mapbox.Center(lat=53.3, lon=-2.5), # 53.4771, -2.2297
),
)
graph_map["layout"]["uirevision"] = "constant"
return graph_map
|
d6ca3697aa74d23dd79ea93d3211c644cc852c72
| 28,809 |
def getFormatter(name):
"""Return the named formatter function. See the function
"setFormatter" for details.
"""
if name in ( 'self', 'instance', 'this' ):
return af_self
elif name == 'class':
return af_class
elif name in ( 'named', 'param', 'parameter' ):
return af_named
elif name in ( 'default', 'optional' ):
return af_default
elif name in ( 'anonymous', 'arbitrary', 'unnamed' ):
return af_anonymous
elif name in ( 'keyword', 'pair', 'pairs' ):
return af_keyword
else:
raise ValueError('unknown trace formatter %r' % name)
|
216e232d0fa599fb1916cbe833ed3da59d9b03cd
| 28,811 |
def _mat_vec(v: PyTree, oks: PyTree) -> PyTree:
"""
compute S v = 1/n ⟨ΔO† ΔO⟩v = 1/n ∑ₗ ⟨ΔOₖᴴ ΔOₗ⟩ vₗ
"""
res = tree_conj(vjp(oks, jvp(oks, v).conjugate()))
return tree_cast(res, v)
|
f6f1242f4f4f55df196f75f51887ce1c9c8af067
| 28,812 |
def get_recordio_iterator(path_to_rec, batch_size, data_shape=(3, 227, 227)):
"""
Creates mxnet recordio iterator for recordio files. For more details see https://beta.mxnet.io/api/gluon-related/_autogen/mxnet.image.ImageIter.html
"""
data_iter = mx.io.ImageRecordIter(
path_imgrec=path_to_rec,
data_shape=data_shape, # output data shape. An 227x227 region will be cropped from the original image.
batch_size=batch_size, # number of samples per batch
)
return data_iter
|
dacda63fc01859f249f7dc897a86706129359d44
| 28,813 |
from typing import Tuple
import ctypes
def raxisa(matrix: ndarray) -> Tuple[ndarray, float]:
"""
Compute the axis of the rotation given by an input matrix
and the angle of the rotation about that axis.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/raxisa_c.html
:param matrix: Rotation matrix.
:return: Axis of the rotation, Angle through which the rotation is performed
"""
matrix = stypes.to_double_matrix(matrix)
axis = stypes.empty_double_vector(3)
angle = ctypes.c_double()
libspice.raxisa_c(matrix, axis, ctypes.byref(angle))
return stypes.c_vector_to_python(axis), angle.value
|
79f08a16c38ba261d5faedc0565dea7199d145a9
| 28,814 |
import errno
from six.moves import builtins
def call_errno_test():
""" Test procedure.
"""
ObjectBrowser.create_browser(errno, 'errno')
ObjectBrowser.create_browser(builtins, 'builtins')
exit_code = ObjectBrowser.execute()
return exit_code
|
f006bb438d502ccac4b7b70e18d83eb2e8b2b258
| 28,815 |
def use_item(entity, player, new_fov_radius, max_fov_radius, sound):
""" Item settings when used """
player.fighter.hp += entity.item.healing
if player.fighter.hp > player.fighter.max_hp:
player.fighter.hp = player.fighter.max_hp
sound.play()
new_fov_radius = max_fov_radius
use_message = Message(f"{entity.name} utilisee, Selen regagne de la serenite !!!", libtcod.darkest_grey)
entity.char = "."
entity.name = "Restes de " + entity.name
entity.item = None
return use_message, new_fov_radius
|
15ae450fa3d2dc6f2e390c9b861783304ba92f30
| 28,816 |
import ast
def parse_recommendation(line):
"""
Parses a recommendation record. Format: userId\t avg_rating\t rmse\t labels
Parameters
----------
line : str
The line that contains user information
Returns
-------
list : list
A list containing userID, avg_rating, rmse, labels
"""
fields = line.strip().split("\t")[:]
return [str(fields[0]), str(fields[1]),
str(fields[2]), str(fields[3]), ast.literal_eval(fields[4])]
|
b49feef1f7010f6cfa35585a86f1b12b314d95c1
| 28,817 |
import math
def two_props_diff_conf_interval(values1: np.ndarray, values2: np.ndarray,
conf_level: float) -> tuple:
"""Calculates the confidence interval for the diff between two proportions
Args:
values1 (np.array): sample 1 binary(0/1) values
values2 (np.array): sample 2 binary(0/1) values
conf_level (float): confidence level
Returns:
tuple: lower and upper values of confidence interval
"""
p1 = np.mean(values1)
p2 = np.mean(values2)
p_diff = p1-p2
n1 = len(values1)
n2 = len(values2)
SE = math.sqrt(((p1*(1-p1))/n1) + ((p2*(1-p2))/n2))
z_critical = utils.get_z_critical(conf_level)
lower = p_diff - z_critical*SE
upper = p_diff + z_critical*SE
return (lower, upper)
|
c87ec7dfe72e13820b197e8b270e49f9ffacdb28
| 28,818 |
from typing import Sequence
def generate_random_text(n_tokens: int, tokens: Sequence[str], probabilities: Sequence[float]) -> str:
"""Return random text with tokens chosen based on token probabilities.
Parameters
----------
n_tokens: int
the length of the text in number of tokens
tokens: Sequence[str]
the list of tokens as sequence, list ordering must match `probabilities` parameter
probabilities: Sequence[float]
the occurance probabilities for each token, list ordering must match `tokens` parameter
Returns
-------
str
The random text that was generated
"""
token_list = np.random.choice(
tokens,
size=n_tokens,
p=probabilities,
replace=True
)
return " ".join(token_list)
|
0d799be072d963f905f87681086820e5653600bf
| 28,819 |
def pad(array_like: ArrayLike, begin: Shape, end: Shape, fill_type: BorderType) -> ShapeletsArray:
"""
Pads an array
Parameters
----------
array_like: ArrayLike
Input array
begin: Shape
Full 4 dimensional tuple specifying how many elements to add at the beggining of the
input array. Negative values are not permitted; 0 implies no changes on a particular
dimension.
end: Shape
Full 4 dimensional tuple, representing the padding at the end of the array. Negative
values are not permitted; 0 implies no changes on a particular
dimension.
fill_type: BorderType {'clampedge', 'periodic', 'symmetric', 'zero'}
Determines the values for the new padded elements.
Returns
-------
ShapeletsArray
Padded version of the original array.
Examples
--------
>>> import shapelets.compute as sc
>>> a = sc.array([[1,2],[3,4]])
No pad at the begining but one extra row at the end, initialized with 'zeros':
>>> sc.pad(a, (0,0,0,0), (1,0,0,0), 'zero')
[3 2 1 1]
1 2
3 4
0 0
Adding two extra rows and two extra column at both ends, using 'symmetric':
>>> sc.pad(a, (1,1,0,0), (1,1,0,0), 'symmetric')
[4 4 1 1]
1 1 2 2
1 1 2 2
3 3 4 4
3 3 4 4
Same as before, but using 'periodic':
>>> sc.pad(a, (1,1,0,0), (1,1,0,0), 'periodic')
[4 4 1 1]
4 3 4 3
2 1 2 1
4 3 4 3
2 1 2 1
Finally, using 'clampedge':
>>> sc.pad(a, (1,1,0,0), (1,1,0,0), 'clampedge')
[4 4 1 1]
1 1 2 2
1 1 2 2
3 3 4 4
3 3 4 4
"""
return _pygauss.pad(array_like, begin, end, __pygauss_fill_type(fill_type))
|
6e858e2a9f250a9a80d9664aa3a10c0643e88d78
| 28,820 |
from typing import Iterable
import itertools
def verify(y_f: GFE, v: Iterable[bytes], c: FiniteFieldPolynomial) -> GFE:
"""Check whether a alleged secret share belongs to a group of shares.
The group of shares is specified by the public `v` and `c` values returned
by `split`. If the share belongs to the group, this returns the x value
corresponding to the share. If the share does not belong to the group,
raises ValueError.
"""
# This is the hash-based verification scheme described in
# https://doi.org/10.1016/j.ins.2014.03.025
ry_f = _hash_list(v, len(y_f)) * y_f
result: GFE
for x_i, v_i in zip(map(y_f.coerce, itertools.count(1)), v):
y_g = c(x_i) - ry_f
if v_i == _hash_pair(y_f, y_g):
try:
result
except NameError:
result = x_i
else:
raise ValueError("Duplicate y value")
try:
return result
except NameError:
raise ValueError("Invalid share")
|
cf682be844245415c85243a0d32fe8ab0387804c
| 28,822 |
def compute_bottom_border(b, dims, mat=False):
"""Compute the bottom border for a given border size."""
x, y = dims
return b + _A(b, x, mat)
|
6853b9cfddc3d727aa5572f5adf7c61c3cfe71f2
| 28,823 |
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Load the saved entities."""
# Print startup message
_LOGGER.info(
"Version %s is starting, if you have any issues please report" " them here: %s",
VERSION,
ISSUE_URL,
)
hass.data.setdefault(DOMAIN, {})
if entry.unique_id is not None:
hass.config_entries.async_update_entry(entry, unique_id=None)
ent_reg = async_get(hass)
for entity in async_entries_for_config_entry(ent_reg, entry.entry_id):
ent_reg.async_update_entity(entity.entity_id, new_unique_id=entry.entry_id)
# Setup the data coordinator
coordinator = AlertsDataUpdateCoordinator(
hass,
entry.data,
entry.data.get(CONF_TIMEOUT),
entry.data.get(CONF_INTERVAL),
)
# Fetch initial data so we have data when entities subscribe
await coordinator.async_refresh()
hass.data[DOMAIN][entry.entry_id] = {
COORDINATOR: coordinator,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
|
2e744f2c67f9f31fd0e3a58d3c5c645e2c7a65d3
| 28,824 |
def name():
""" """
response = PhoneInterface()
text = request.form
if text:
result = to_noun(text['SpeechResult'])
response.speak('Is your name {}?'.format(result))
new_user = User.query.get(response.get_user_id())
new_user.name = result
db.save()
response.listen('listenbot.confirm_name', 'boolean')
else:
response.speak("I don't understand. Can you say that again?")
response.listen('listenbot.name')
return str(response)
|
62638b78806de07067439d337ac6e70f0069db5a
| 28,825 |
def bonferonni_posthoc(*args, ttest_type='equal'):
"""Computes T-Tests between groups, should adjust your
significance value, a.k.a. bonferonni correction to
minimize family wide error rates
Args:
group1: Input group 1 as a numpy array
group2: Input group 2 as a numpy array
groupN: Input group n as a numpy array
ttest_type: [('equal') | 'unequal' | 'repeated'] string
of which type of ttest to perform
Returns:
P_values: p_value associated with mean difference
Delta Means: difference between the means
Lower CI : Lower 95th Confidence Interval
Upper CI : Upper 95th Confidence Interval
"""
k_groups = len(args)
ttest = {'equal': equal_variance_ttest,
'unequal': unequal_variance_ttest,
'repeated': repeated_ttest}[ttest_type.lower()]
p_values = np.full((k_groups, k_groups), 0.0)
delta_x = p_values.copy()
lower_ci = p_values.copy()
upper_ci = p_values.copy()
for ndx2 in range(k_groups):
for ndx1 in range(ndx2+1, k_groups):
result = ttest(args[ndx1], args[ndx2])
deltaX = result['Mean1'] - result['Mean2']
delta_x[ndx1, ndx2] = deltaX
p_values[ndx1, ndx2] = result['P_value']
lower_ci[ndx1, ndx2] = result['95th CI'][0]
upper_ci[ndx1, ndx2] = result['95th CI'][1]
return {'P_value': p_values,
'Delta Means': delta_x,
'Lower CI': lower_ci,
'Upper CI': upper_ci}
|
9ee5537106d6cd5b6074ff8839ee631a47dc5f5b
| 28,827 |
from typing import Dict
def list_subject(_type: str, limit: int = 10, offset: int = 0) -> Dict:
"""
获取subject列表
"""
url_path = "/api/v1/web/subjects"
params = {"type": _type, "limit": limit, "offset": offset}
return _call_iam_api(http_get, url_path, data=params)
|
26085a5676263ba425e3306332a380df849e7069
| 28,828 |
def get_all_envs(envs_file):
"""Get list of all intercepted environment variables."""
return list(iter_envs(envs_file))
|
425cf386b200fe2aa0545c48539c1497d3bdb253
| 28,830 |
import six
def _deserialize_primitive(data, klass):
"""Deserializes to primitive type.
:param data: data to deserialize.
:param klass: class literal.
:return: int, long, float, str, bool.
:rtype: int | long | float | str | bool
"""
try:
value = klass(data)
except UnicodeEncodeError:
value = six.u(data)
except TypeError:
value = data
return value
|
62f31f9230d33d0120cca3fb187746871f54a8bb
| 28,831 |
def highlight_seam(img: np.ndarray, seam: np.ndarray) -> np.array:
"""
Function to highlight the seam
Args:
img (np.array): Image array
seam (np.array): Seam array with length equals height of the image array
The x-coordinates of the pixel to remove from each row
Returns:
Image array with seam highlighted
"""
if len(seam) != img.shape[0]:
err_msg = "Seam height {0} does not match image height {1}"
raise ValueError(err_msg.format(img.shape[0], len(seam)))
highlight = img.copy()
height, width, s = img.shape
for i in range(height):
j = [x for x in seam if x[1] == i][0][0]
if s == 3 :
highlight[i][j-1] = np.array([255, 0, 0])
elif s == 4 :
highlight[i][j-1] = np.array([255, 0, 0, 0])
return highlight
|
81cc5dd7d28b4240e43cdef6e9bcde364cbf2b01
| 28,832 |
def _maybe_encode_unicode_string(record):
"""Encodes unicode strings if needed."""
if isinstance(record, str):
record = bytes(record, "utf-8").strip()
return record
|
2621056ba77fd314b966e3e0db08887da53e3803
| 28,833 |
import torch
def filter(x):
"""
applies modified bilateral filter to AB channels of x, guided by L channel
x -- B x 3 x H x W pytorch tensor containing an image in LAB colorspace
"""
h = x.size(2)
w = x.size(3)
# Seperate out luminance channel, don't use AB channels to measure similarity
xl = x[:,:1,:,:]
xab = x[:,1:,:,:]
xl_pad = get_pad(xl)
xl_w = {}
for i in range(3):
for j in range(3):
xl_w[str(i) + str(j)] = xl_pad[:, :, i:(i+h), j:(j+w)]
# Iteratively apply in 3x3 window rather than use spatial kernel
max_iters = 5
cur = torch.zeros_like(xab)
# comparison function for pixel intensity
def comp(x, y):
d = torch.abs(x - y) * 5.
return torch.pow(torch.exp(-1. * d),2)
# apply bilateral filtering to AB channels, guideded by L channel
cur = xab.clone()
for it in range(max_iters):
cur_pad = get_pad(cur)
xl_v = {}
for i in range(3):
for j in range(3):
xl_v[str(i) + str(j)] = cur_pad[:, :, i:(i+h), j:(j+w)]
denom = torch.zeros_like(xl)
cur = cur * 0.
for i in range(3):
for j in range(3):
scl = comp(xl, xl_w[str(i) + str(j)])
cur = cur + xl_v[str(i) + str(j)] * scl
denom = denom + scl
cur = cur / denom
# store result and return
x[:, 1:, :, :] = cur
return x
|
57b650e39d7c552dcdde0fddf39388ed7946716f
| 28,834 |
def series_not_found():
"""
Formats error message for event with missing series.
:return: error message
:rtype: str
"""
error = "event(s) where their series could not be found"
return error
|
10d0915d7e47fd308de8c072cdf9c81f078d2eee
| 28,835 |
def compiler(language, config, permit_undefined_jinja=False):
"""Support configuration of compilers. This is somewhat platform specific.
Native compilers never list their host - it is always implied. Generally, they are
metapackages, pointing at a package that does specify the host. These in turn may be
metapackages, pointing at a package where the host is the same as the target (both being the
native architecture).
"""
compiler = native_compiler(language, config)
version = None
if config.variant:
target_platform = config.variant.get('target_platform', config.subdir)
language_compiler_key = '{}_compiler'.format(language)
# fall back to native if language-compiler is not explicitly set in variant
compiler = config.variant.get(language_compiler_key, compiler)
version = config.variant.get(language_compiler_key + '_version')
else:
target_platform = config.subdir
# support cross compilers. A cross-compiler package will have a name such as
# gcc_target
# gcc_linux-cos6-64
compiler = '_'.join([compiler, target_platform])
if version:
compiler = ' '.join((compiler, version))
compiler = ensure_valid_spec(compiler, warn=False)
return compiler
|
131811e7642e9756e2506de9ba89e5ca304a9498
| 28,836 |
from typing import Iterable
import itertools
def iter_contour_segments(points: Contour) -> Iterable[Segment]:
"""Given points A, B, ...N returns (A, B), (B, ...), (..., N), (N, A)."""
# "contour" frequently has shape (N, 1, 2); remove "1" middle layer.
if len(points.shape) > 2:
size = points.size
points = points.view()
points.shape = (size // 2, 2)
return zip(
points,
itertools.chain(points[1:], points[:1]))
|
8f5911ee02f96be085b09c2143b989e6120e98cc
| 28,839 |
def intword(value):
"""
Converts a large integer to a friendly text representation. Works best for
numbers over 1 million. For example, 1000000 becomes '1.0 million', 1200000
becomes '1.2 million' and '1200000000' becomes '1.2 billion'.
"""
value = int(value)
if value < 1000000:
return value
if value < 1000000000:
new_value = value / 1000000.0
return ungettext('%(value).1f million', '%(value).1f million', new_value) % {'value': new_value}
if value < 1000000000000:
new_value = value / 1000000000.0
return ungettext('%(value).1f billion', '%(value).1f billion', new_value) % {'value': new_value}
if value < 1000000000000000:
new_value = value / 1000000000000.0
return ungettext('%(value).1f trillion', '%(value).1f trillion', new_value) % {'value': new_value}
return value
|
d9f3c776e3bc354cb68434080ca74aa4f52ad1aa
| 28,840 |
def __validate_archive_file_arg(required_arg_map):
"""
Verify that the archive file exists.
:param required_arg_map: the required arguments map
:return: the archive file name
:raises CLAException: if the archive file is not valid
"""
_method_name = '__validate_archive_file_arg'
archive_file_name = required_arg_map[CommandLineArgUtil.ARCHIVE_FILE_SWITCH]
try:
FileUtils.validateExistingFile(archive_file_name)
except IllegalArgumentException, iae:
ex = exception_helper.create_cla_exception('WLSDPLY-20014', _program_name, archive_file_name,
iae.getLocalizedMessage(), error=iae)
ex.setExitCode(CommandLineArgUtil.ARG_VALIDATION_ERROR_EXIT_CODE)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
return archive_file_name
|
dfc56614d832dee3e4534ac9e0634770e0c13cae
| 28,841 |
import inspect
def suite(description=None, name=None, rank=None):
"""
Decorator, mark a class as a suite class.
:param description: suite's description (by default, the suite's description is built from the name)
:param name: suite's name (by default, the suite's name is taken from the class's name)
:param rank: this value is used to order suites of the same hierarchy level
"""
def wrapper(klass):
assert inspect.isclass(klass), "%s is not a class (suite decorator can only be used on a class)" % klass
md = get_metadata(klass)
assert not md.dependencies, "'depends_on' can not be used on a suite class"
md.is_suite = True
md.rank = rank if rank is not None else _get_metadata_next_rank()
md.name = name or klass.__name__
md.description = description or build_description_from_name(md.name)
return klass
return wrapper
|
6d7568196d92c64f6451797c854e90cc80ae9a9f
| 28,842 |
def mercator_to_gcj02(mer_matrix):
"""
Mercator coordinates to National Bureau of Survey and Measurement coordinates
:param mer_matrix:
:return:
"""
return wgs84_to_gcj02(
mercator_to_wgs84(mer_matrix)
)
|
9e131301dc4c203f6bd1698ffd44c7e3b5e52aad
| 28,844 |
def delete_menu_item(current_user, item_id):
"""Delete menu item by id"""
if current_user['admin']:
resp = menu_inst.del_menu(item_id)
if resp:
return jsonify({"Message": "Item deleted"}), 200
return jsonify({"Message": "Item not found"}), 404
return jsonify({"Message": "Not authorized to delete menu"}), 401
|
f8ba25ec925489942e6d71f4854bb7d9f7bd427a
| 28,845 |
from typing import Tuple
from typing import Dict
def generate_parameters(system: params.CollisionSystem) -> Tuple[np.ndarray, np.ndarray, int, Dict[int, params.SelectedRange]]:
""" Generate the analysis parameters.
This can be called multiple times if necessary to retrieve the parameters easily in any function.
Args:
system: Collision system.
Returns:
(pt_values, eta_values, n_cent_bins, centrality_ranges): Pt values where the efficiency should be evaluated,
eta values where the efficiency should be evaluated, number of centrality bins, map from centrality bin
number to centrality bin ranges.
"""
pt_values = np.linspace(0.15, 9.95, 100 - 1)
eta_values = np.linspace(-0.85, 0.85, 35)
n_cent_bins = 4 if system != params.CollisionSystem.pp else 1
centrality_ranges = {
0: params.SelectedRange(0, 10),
1: params.SelectedRange(10, 30),
2: params.SelectedRange(30, 50),
3: params.SelectedRange(50, 90),
}
return pt_values, eta_values, n_cent_bins, centrality_ranges
|
8f743bfcbc998b8f2bfd05d10bb0a6acb0777d04
| 28,846 |
def merge_extras(extras1, extras2):
"""Merge two iterables of extra into a single sorted tuple. Case-sensitive"""
if not extras1:
return extras2
if not extras2:
return extras1
return tuple(sorted(set(extras1) | set(extras2)))
|
0383e0e99c53844f952d919eaf3cb478b4dcd6d1
| 28,847 |
def netdev_get_driver_name(netdev):
"""Returns the name of the driver for network device 'netdev'"""
symlink = '%s/sys/class/net/%s/device/driver' % (root_prefix(), netdev)
try:
target = os.readlink(symlink)
except OSError, e:
log("%s: could not read netdev's driver name (%s)" % (netdev, e))
return None
slash = target.rfind('/')
if slash < 0:
log("target %s of symbolic link %s does not contain slash"
% (target, symlink))
return None
return target[slash + 1:]
|
fa55fb2e95357534f7630fdae4bf5304bef27f09
| 28,848 |
def _get_content(item, base_url=None):
"""
Return a dictionary of content, for documents, objects and errors.
"""
return {
_unescape_key(key): _primitive_to_document(value, base_url)
for key, value in item.items()
if key not in ("_type", "_meta")
}
|
91c9e2ea9a74b1a44f6aa83154815941f9d3460d
| 28,849 |
def _cluster_by_adjacency(sel_samples):
"""Function for clustering selected samples based on temporal adjacency.
Input arguments:
sel_samples - A vector of booleans indicating which samples have been
selected.
Output arguments:
clusters - A vector of cluster numbers indicating to which cluster
each sample belongs to. The cluster number zero corresponds
to samples that were not selected.
"""
clusters = np.zeros(len(sel_samples), dtype='int')
cluster_number = 1 # Next cluster number.
for i, s in enumerate(sel_samples):
if (s == True):
clusters[i] = cluster_number
else:
# Update the cluster number at temporal discontinuities.
if (i > 0 and sel_samples[i-1] == True):
cluster_number += 1
return clusters
|
d2ed71112e0bf4d8bfeaf6069ef46c7e8b7f4b4a
| 28,850 |
import requests
def send_mail(email, email_string):
"""
handles email sending procedures
:param email
:param email_string
:return:
"""
key = MAILGUN_KEY
recipient = email
request_url = MAILGUN_URL
data = {
'from': MAILGUN_TESTMAIL_ADDR,
'to': recipient,
'subject': 'Daily Report',
'text': email_string
}
request = requests.post(request_url, auth=('api', key), data=data)
return request.text
|
f326585a971c26eda9bf3646298f2523fa166555
| 28,851 |
import torch
def _relu_3_ramp(x):
""" Relu(x) ** 3 ramp function
returns
f(x) = relu(x) ** 3
df/dx(x) = relu(x) ** 2
"""
rx = torch.relu(x)
ramp = rx.pow(3)
grad = rx.pow(2) * 3.0
return ramp, grad
|
56dfc37ef81209590e020f0c67f8204a6d8d338a
| 28,852 |
def get_ham_ising_tube(dtype, Ly, lam=-3.044):
"""Return the local term for the 2+1D Ising Hamiltonian on a narrow torus.
Defines the global Hamiltonian:
$H = -\sum_{\langle i, j \rangle} X_i X_j + lam * \sum_i Z_i ]$
Represents the Hamiltonian for the 2D torus as a 1-dimensional Hamiltonian,
where each "site" is a slice of the torus in the "y" direction. The site
dimension thus depends on the size of the system in the y direction.
Args:
dtype: The data type.
Ly: The size of the torus in the y direction (number of sites).
lam: The field strength.
Returns:
The Hamiltonian term, separated into a 1-site contribution and a 2-site
MPO.
"""
X = backend.np.array([[0.0, 1.0], [1.0, 0.0]])
Z = backend.np.array([[1.0, 0.0], [0.0, -1.0]])
Xcol = [
backend.np.kron(
backend.np.kron(backend.np.eye(2**i), X),
backend.np.eye(2**(Ly - i - 1)))
for i in range(Ly)
]
Zcol = [
backend.np.kron(
backend.np.kron(backend.np.eye(2**i), Z),
backend.np.eye(2**(Ly - i - 1)))
for i in range(Ly)
]
Xcol = [backend.convert_to_tensor(Xc, dtype=dtype) for Xc in Xcol]
Zcol = [backend.convert_to_tensor(Zc, dtype=dtype) for Zc in Zcol]
h1 = lam * sum(Zcol) - sum(Xcol[i] @ Xcol[(i + 1) % Ly] for i in range(Ly))
h_mpo_2site = ([-Xc for Xc in Xcol], Xcol)
return h1, h_mpo_2site
|
f0b0ae303422c52b434a05c97992f9f8793d440f
| 28,853 |
def processed_for_entities_query_clause():
"""
:return: A solr query clause you can use to filter for stories that have been tagged by any version
of our CLIFF geotagging engine (ie. tagged with people, places, and organizations)
"""
return "(tags_id_stories:({}))".format(" ".join([str(t) for t in processed_for_entities_tag_ids()]))
|
31818cd67c4a35b73504f14f11316461086cf58c
| 28,854 |
def get_shared_prefix(w1, w2):
"""Get a string which w1 and w2 both have at the beginning."""
shared = ""
for i in range(1, min(len(w1), len(w2))):
if w1[:i] != w2[:i]:
return shared
else:
shared = w1[:i]
return shared
|
d52850f038bc6bfe65878e3a58d7009e563af0a0
| 28,855 |
async def get_poll_ops(op_type=None, block_range=None) -> Result:
"""Returns a list of 'polls' ops within the specified block or time range."""
sql = SearchQuery.poll_ops(
op_type=op_type,
block_range=block_range
)
result = []
if sql:
res = db.db.select(sql) or []
for entry in res:
result.append(populate_by_schema(
entry, ['transaction_id', 'req_posting_auths', 'op_type', 'op_payload']
))
return Success(result)
|
4b26e411047f46aceb03fc98be1a7ba58da71a07
| 28,856 |
def list_of_divisors_v1(n):
"""Return [ list of divisors ]"""
"""
This is a slow algorithm. But it is correct.
"""
if n == 1:
return [1]
if n == 2:
return [1,2]
L = {}
if n > 0:
L[1] = True
if n > 1:
L[n] = True
for i in list_of_prime_factors(n):
L[i] = True
for j in list_of_divisors(n // i):
L[j] = True
return L.keys()
|
b017d90fc8744a9607fffadaf2c653762af7c25a
| 28,857 |
from typing import Optional
import re
def get_optimal_train_size(
nb_vectors: int, index_key: str, current_memory_available: Optional[str], vec_dim: Optional[int],
) -> int:
"""
Function that determines the number of training points necessary to
train the index, based on faiss heuristics for k-means clustering.
"""
matching = re.findall(r"IVF\d+|IMI\d+x\d+", index_key)
if matching:
nb_clusters = index_key_to_nb_cluster(index_key)
points_per_cluster: int = 100
# compute best possible number of vectors to give to train the index
# given memory constraints
if current_memory_available and vec_dim:
memory_per_cluster_set = compute_memory_necessary_for_training_wrapper(
points_per_cluster, index_key, vec_dim
)
size = cast_memory_to_bytes(current_memory_available)
points_per_cluster = max(min(size / memory_per_cluster_set, points_per_cluster), 31.0)
# You will need between 30 * nb_clusters and 256 * nb_clusters to train the index
train_size = min(round(points_per_cluster * nb_clusters), nb_vectors)
else:
raise ValueError(f"Unknown index type: {index_key}")
return train_size
|
40d63534cd7e98b2a0e9ad73260fe01d4e4cdd8e
| 28,858 |
def _initialize_gui(frame, view=None):
"""Initialize GUI depending on testing mode."""
if _testing_mode(): # open without entering mainloop
return frame.edit_traits(view=view), frame
else:
frame.configure_traits(view=view)
return frame
|
3b453ccaf4341c610e7efec3494833b1ecde437e
| 28,859 |
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
|
13b87c518086c838f6134c186c8892596601b741
| 28,860 |
def cls_token(idx):
"""
Function helps in renaming cls_token weights
"""
token = []
token.append((f"cvt.encoder.stages.{idx}.cls_token", "stage2.cls_token"))
return token
|
7f07ca4fe04326b4895e3fd41a3830dddc147f8a
| 28,861 |
def fast_betweenness(G, weight=None, kind = 'edge', norm=True, cutoff=None):
"""
Gets betweenness centrality. For relativelly large graphs, this func is
faster than networkx
Parameters
----------
G : NetworkX DiGraph or Graph
The graph to be considered.
weight: string
edge weights for shortest paths.
kind: 'edge' or 'node'
Betweenness for edges or nodes.
norm: bool
If True, returns norm betweenness (bet/((N-1)*(N-2))).
Returns
-------
dict
"""
if weight != None:
Gig = get_igraph(G, edge_weights = weight)
else:
Gig = get_igraph(G)
norm_val = len(G.nodes)*(len(G.nodes)-1)
if kind=='edge':
bet = Gig.edge_betweenness(weights=weight, cutoff=cutoff)
if norm==True:
return {e:b/norm_val for e,b in zip(G.edges,bet)}
else:
return {e:b for e,b in zip(G.edges,bet)}
elif kind=='node':
bet = Gig.betweenness(weights=weight, cutoff=cutoff)
if norm==True:
return {e:b/norm_val for e,b in zip(G.nodes,bet)}
else:
return {e:b for e,b in zip(G.nodes,bet)}
|
e06b9b1fb27f517ab2b90738f831ecc91596b3ef
| 28,863 |
def regularizer(regularization_type, regularization_rate, params):
"""
Our params all have different dimensions.
So, we loop through each w or b in our params, apply operation, sum, then use the resulting scalar
for the next, final sum in either L1 or L2 regularization
"""
if regularization_type == 'l1':
return regularization_rate * tf.reduce_sum([tf.reduce_sum(tf.abs(param)) for param in params])
if regularization_type == 'l2':
return regularization_rate * tf.reduce_sum([tf.reduce_sum(tf.square(param)) for param in params])/2.0
|
f75970411a8d9b479b990aa78083ffd9b54064cf
| 28,864 |
import collections
def get_eq_crc(devices):
"""
Builds a CRC string based on device id and device status. This function is reverse engineered
and translated to python. It is based on the CoderUtils class in the elro app
:param devices: An dictionary of devices statuses, where the id of the device is the index of the dict
"""
sorted_devices = collections.OrderedDict(sorted(devices.items()))
list_length = int(list(sorted_devices.keys())[-1])
status_crc = ""
for i in range(list_length+1):
if (i+1) in sorted_devices:
status_crc += crc_maker_char(sorted_devices[i+1])
elif i < (list_length):
status_crc += "0000"
num = ""
list_length_for_hex = hex((list_length*2+2))[2:]
if len(list_length_for_hex) < 4:
i = 4 - len(list_length_for_hex)
num = list_length_for_hex.rjust(i + len(list_length_for_hex), '0')
else:
num = list_length_for_hex
return (num + status_crc)
|
5288935625bde9e8d4bb1518c1f8fb4ff1ee79a7
| 28,865 |
import re
def get_hash_for_filename(filename, hashfile_path):
"""Return hash for filename in the hashfile."""
filehash = ''
with open(hashfile_path, 'r') as stream:
for _cnt, line in enumerate(stream):
if line.rstrip().endswith(filename):
filehash = re.match(r'^[A-Za-z0-9]*', line).group(0)
break
if filehash:
return filehash
raise AttributeError("Filename %s not found in hash file" % filename)
|
8e9e74b5995c4bfa627637e8fd9434c17684e4e9
| 28,866 |
def resonance_mass_distributions(axes):
"""
Verification of mass disributions for several resonance species. Grey
dashed lines are the Breit-Wigner distributions with mass-dependent width,
grey solid lines are the same distributions with momentum integrated out,
and colored lines are histograms of the sampled masses.
"""
with axes() as ax:
T = .15
for ID, name in [
(213, r'$\rho(770)$'),
(2214, r'$\Delta(1232)$'),
(22212, r'$N(1535)$'),
]:
info = frzout.species_dict[ID]
m0 = info['mass']
w0 = info['width']
m_min, m_max = info['mass_range']
sign = -1 if info['boson'] else 1
def bw(m):
w = w0*np.sqrt((m - m_min)/(m0 - m_min))
return w/((m - m0)**2 + w*w/4)
def f(p, m):
return p*p / (np.exp(np.sqrt(p*p + m*m)/T) + sign)
m = np.linspace(m_min, m_max, 200)
ax.plot(m, bw(m)/integrate.quad(bw, m_min, m_max)[0],
**dashed_line)
bwf = np.array([
integrate.quad(lambda p: bw(m_)*f(p, m_), 0, 5)[0] for m_ in m
]) / integrate.dblquad(
lambda m_, p: bw(m_)*f(p, m_),
0, 5, lambda _: m_min, lambda _: m_max
)[0]
ax.plot(m, bwf, color=default_color)
hrg = frzout.HRG(T, species=[ID], res_width=True)
x = np.array([[1, 0, 0, 0]], dtype=float)
sigma = np.array([[1e6/hrg.density(), 0, 0, 0]])
v = np.zeros((1, 3))
surface = frzout.Surface(x, sigma, v)
parts = frzout.sample(surface, hrg)
m = np.sqrt(np.inner(parts['p']**2, [1, -1, -1, -1]))
ax.hist(m, bins=64, density=True, histtype='step', label=name)
ax.set_xlim(0, 2)
ax.set_xlabel('Mass [GeV]')
ax.set_ylabel('Probability')
ax.set_yticklabels([])
ax.legend(loc='upper left')
|
69e6d7166f6f4f0e6f5389835c35f9f2a10d51ff
| 28,869 |
import logging
def get_logger():
""" Get named logger """
return logging.getLogger(__name__)
|
b89fe9166f25c6eca03c0f54db2bb5863768fc6d
| 28,870 |
def word_match(reg_, str_):
"""function compares words of equal length
Invokes char_match for each character"""
if not reg_:
return True
elif not str_:
if reg_ == "$":
return True
return False
if len(reg_) == 1:
return char_match(reg_[0], str_[0])
if reg_[0] == '\\':
if not reg_[1] == str_[0]:
return False
return word_match(reg_[2:], str_[1:])
elif reg_[1] == "?":
return word_match(reg_[2:], str_) or\
word_match(reg_[0] + reg_[2:], str_)
elif reg_[1] == "*":
return word_match(reg_[2:], str_) or\
word_match(reg_, str_[1:])
elif reg_[1] == "+":
return word_match(reg_[0] + reg_[2:], str_) or\
word_match(reg_, str_[1:])
if not char_match(reg_[0], str_[0]):
return False
return word_match(reg_[1:], str_[1:])
|
8064fef1d8c9dc4e6353a909b64fa1213bfe9b95
| 28,871 |
def fdp_to_model(package, table_name, resource, field_translator):
"""
Create a Babbage Model from a Fiscal DataPackage descriptor
:param package: datapackage object
:param table_name: db table name to use
:param resource: resource to load (in the datapackage object)
:param field_translator: dict for translating resource attribute names to valid db column names
:return: Babbage Model
"""
model = {
'fact_table': table_name,
'measures': {},
'dimensions': {}
}
mapping = package.descriptor['model']
schema = resource.descriptor['schema']['fields']
field_titles = dict((f.get('name'), f.get('title', f.get('name'))) for f in schema)
resource_name = resource.descriptor['name']
# Converting measures
all_concepts = set()
for orig_name, measure in mapping['measures'].items():
if resource_name != measure.get('resource', resource_name):
continue
name = database_name(orig_name, all_concepts, 'measure')
all_concepts.add(name)
babbage_measure = {
'label': field_titles.get(measure['source'], measure['source']),
'column': field_translator[measure['source']]['name'],
'orig_measure': orig_name
}
if 'currency' in measure:
babbage_measure['currency'] = measure['currency']
model['measures'][name]=babbage_measure
hierarchies = {}
# Converting dimensions
for orig_name, dimension in mapping['dimensions'].items():
# Normalize the dimension name
name = database_name(orig_name, all_concepts, 'dimension')
all_concepts.add(name)
attribute_names = {}
attributes = dimension['attributes']
for orig_attr_name in attributes.keys():
attr_name = database_name(orig_attr_name, attribute_names.values(), 'attr')
attribute_names[orig_attr_name] = attr_name
primaryKeys = dimension['primaryKey']
if not isinstance(primaryKeys,list):
primaryKeys = [primaryKeys]
# Marking which attributes have labels
labels = {}
for label_name, attr in attributes.items():
if 'labelfor' in attr:
labels[attr['labelfor']] = label_name
# Flattening multi-key dimensions into separate dimensions
for pkey in primaryKeys:
# Get slugified name
translated_pkey = attribute_names[pkey]
# Get name for the dimension (depending on the number of primary keys)
if len(primaryKeys) > 1:
dimname = database_name(orig_name + '_' + translated_pkey, all_concepts, 'dimension')
else:
dimname = database_name(orig_name, all_concepts, 'dimension')
label = field_titles[attributes[pkey]['source']]
all_concepts.add(dimname)
# Create dimension and key attribute
translated_field = field_translator[attributes[pkey]['source']]
source = translated_field['name']
type = translated_field['type']
babbage_dimension = {
'attributes': {
translated_pkey:
{'column': source,
'label': field_titles[attributes[pkey]['source']],
'datatype': type,
'orig_attribute': pkey}
},
'label': label,
'key_attribute': translated_pkey,
'orig_dimension': orig_name
}
# Update hierarchies
hierarchies.setdefault(name, {'levels': [],
'label': name.replace('_',' ').title()}
)['levels'].append(dimname)
# Add label attributes (if any)
if pkey in labels:
label = labels[pkey]
translated_label_field = field_translator[attributes[label]['source']]
label_source = translated_label_field['name']
label_type = translated_label_field['type']
babbage_dimension['attributes'][attribute_names[label]] = \
{
'column': label_source,
'label': field_titles[attributes[label]['source']],
'datatype': label_type,
'orig_attribute': label
}
babbage_dimension['label_attribute'] = attribute_names[label]
# Copy other attributes as well (if there's just one primary key attribute)
if len(primaryKeys) == 1:
for attr_name, attr in attributes.items():
if attr_name not in (pkey, labels.get(pkey)):
translated_attr_field = field_translator[attributes[attr_name]['source']]
attr_source = translated_attr_field['name']
attr_type = translated_attr_field['type']
babbage_dimension['attributes'][attribute_names[attr_name]] = \
{
'column': attr_source,
'label': field_titles[attributes[attr_name]['source']],
'datatype': attr_type,
'orig_attribute': attr_name
}
model['dimensions'][dimname] = babbage_dimension
model['hierarchies'] = dict((k,v) for k,v in hierarchies.items())
return model
|
358540a812707e42c8022dcd8e7349a8fe674dfc
| 28,872 |
def PN_gen(N_bits,m=5):
"""
Maximal length sequence signal generator.
Generates a sequence 0/1 bits of N_bit duration. The bits themselves
are obtained from an m-sequence of length m. Available m-sequence
(PN generators) include m = 2,3,...,12, & 16.
Parameters
----------
N_bits : the number of bits to generate
m : the number of shift registers. 2,3, .., 12, & 16
Returns
-------
PN : ndarray of the generator output over N_bits
Notes
-----
The sequence is periodic having period 2**m - 1 (2^m - 1).
Examples
--------
>>> # A 15 bit period signal nover 50 bits
>>> PN = PN_gen(50,4)
"""
c = m_seq(m)
Q = len(c)
max_periods = int(np.ceil(N_bits/float(Q)))
PN = np.zeros(max_periods*Q)
for k in range(max_periods):
PN[k*Q:(k+1)*Q] = c
PN = np.resize(PN, (1,N_bits))
return PN.flatten()
|
e3e51fdb6dd88483facb143f7c16b87cfb8ee15e
| 28,873 |
def df_to_dataset(df: pd.DataFrame, **kwargs):
"""
:param kwargs: kwargs are additional keys to pass to `df`
For example if kwargs is {'some': 3}, than df gets new column `some` with value set to 3 in every row
"""
df = df.replace(np.nan, '', regex=True)
for key, value in kwargs.items():
df[key] = value
headers = df.columns.values.tolist()
data = df.values.tolist()
return tablib.Dataset(*data, headers=headers)
|
d434a7ffe72c6c588a2e9f75d1ad6f8e6e406f0a
| 28,874 |
def zstandarization(value, distribution=None):
"""
Apply a z standarization to the value.
Value can be another distribution.
"""
value = np.array([float(i) for i in value])
if distribution:
return (np.array(value)-np.mean(np.array(distribution)))/np.std(np.array(distribution))
else:
return (np.array(value)-np.mean(value))/np.std(value)
|
a54ef1abcf5f49e0aba062712799fa2e3aff6a33
| 28,875 |
def _determine_default_project(project=None):
"""Determine default project ID explicitly or implicitly as fall-back.
In implicit case, supports three environments. In order of precedence, the
implicit environments are:
* GCLOUD_PROJECT environment variable
* Google App Engine application ID
* Google Compute Engine project ID (from metadata server)
:type project: string
:param project: Optional. The project name to use as default.
:rtype: string or ``NoneType``
:returns: Default project if it can be determined.
"""
if project is None:
project = _get_production_project()
if project is None:
project = _app_engine_id()
if project is None:
project = _compute_engine_id()
return project
|
6e19df4a15ab323a1858421c8f64d1d8be34d1f3
| 28,876 |
def collapse(intlist):
"""Collapse a list of int values of chars into the int they represent."""
f = ''
for i in intlist:
f += chr(i)
return int(f)
|
7b92a456e78c8b6d8bbdc5af805b22728865ec63
| 28,877 |
import click
from typing import Union
from typing import Tuple
from typing import Dict
def _validate_environment_variable(
ctx: click.core.Context,
param: Union[click.core.Option, click.core.Parameter],
value: Tuple[str],
) -> Dict[str, str]:
"""
Validate that environment variables are set as expected.
"""
# We "use" variables to satisfy linting tools.
for _ in (param, ctx):
pass
env = {}
for definition in value:
try:
key, val = definition.split(sep='=', maxsplit=1)
except ValueError:
message = (
'"{definition}" does not match the format "<KEY>=<VALUE>".'
).format(definition=definition)
raise click.BadParameter(message=message)
env[key] = val
return env
|
1e079a858325bf8ce1d185ad464d6c5c23c0c338
| 28,879 |
from typing import Tuple
def get_settings() -> Tuple[int, int]:
"""Gets some settings for the board."""
return board_size, num_ships
|
2c2d0b35b3f86b8b96148995c46e89fef1573b30
| 28,880 |
def run_sunset():
""" Sends a tweet about the sunset and captures image """
tweet_sunset()
return run_tweeter()
|
f8d5bbac93025c4a9f9a04969185aa94e3dc26c0
| 28,881 |
import pprint
def get_data():
"""Get the population data."""
# Construct population
pop = CosmicPopulation(n_srcs=SIZE, n_days=1, name='standard_candle')
pop.set_dist(model='sfr', z_max=2.5, H_0=67.74, W_m=0.3089, W_v=0.6911)
pop.set_dm_host(model='constant', value=100)
pop.set_dm_igm(model='ioka', slope=1000, std=None)
pop.set_dm_mw(model='ne2001')
pop.set_emission_range(low=10e6, high=10e9)
pop.set_lum(model='constant', value=1e36)
pop.set_w(model='constant', value=1.)
pop.set_si(model='constant', value=0)
pop.generate()
# Survey population
pops = {}
for b in BEAMPATTERNS:
pprint(f'Surveying with {b} beampattern')
n_s = 0
bp = b
if b.startswith('airy'):
bp, n_s = b.split('-')
n_s = int(n_s)
survey = Survey(name='perfect-small')
# Prevent beam from getting larger than the sky
survey.set_beam(model=bp, n_sidelobes=n_s, size=10)
surv_pop = SurveyPopulation(pop, survey)
print(surv_pop.source_rate)
pops[b] = surv_pop
return pops
|
038d0292a4d158531461d75ccba3e5baa8e50dbd
| 28,882 |
def tag_user(request, tag, username):
""" Display all `tag` snippets of `username` user """
user = get_object_or_404(User, username=username)
snippets = Snippet.objects.filter(author=user).filter(
tags__name__in=[tag, ]).all()
return render_to_response('tags/view.html', {
'tag': tag,
'snippets': snippets
}, context_instance=build_context(request))
|
c492f0ca1933ee3ee9875a1d0586a1dc8735c073
| 28,885 |
import warnings
def extract_keywords(keywlist_handle):
"""extract_keywords(keywlist_handle) -> list of keywords
Return the keywords from a keywlist.txt file.
"""
warnings.warn("Bio.SwissProt.KeyWList.extract_keywords is deprecated. Please use the function Bio.SwissProt.KeyWList.parse instead to parse the keywlist.txt file. In case of any problems, please contact the Biopython developers ([email protected]).",
DeprecationWarning)
if type(keywlist_handle) is not FileType and \
type(keywlist_handle) is not InstanceType:
raise ValueError("I expected a file handle or file-like object")
return ListParser().parse(keywlist_handle)
|
4679a2774eed9e40783662312b4b661c91e00f36
| 28,886 |
def automated_threshold_setting(image, mask_local_max):
"""Automatically set the optimal threshold to detect spots.
In order to make the thresholding robust, it should be applied to a
filtered image (bigfish.stack.log_filter for example). The optimal
threshold is selected based on the spots distribution. The latter should
have a kink discriminating a fast decreasing stage from a more stable one
(a plateau).
Parameters
----------
image : np.ndarray
Image with shape (z, y, x) or (y, x).
mask_local_max : np.ndarray, bool
Mask with shape (z, y, x) or (y, x) indicating the local peaks.
Returns
-------
optimal_threshold : int
Optimal threshold to discriminate spots from noisy blobs.
"""
# check parameters
stack.check_array(image,
ndim=[2, 3],
dtype=[np.uint8, np.uint16, np.float32, np.float64])
stack.check_array(mask_local_max,
ndim=[2, 3],
dtype=[bool])
# get threshold values we want to test
thresholds = _get_candidate_thresholds(image.ravel())
# get spots count and its logarithm
first_threshold = float(thresholds[0])
spots, mask_spots = spots_thresholding(
image, mask_local_max, first_threshold, remove_duplicate=False)
value_spots = image[mask_spots]
thresholds, count_spots = _get_spot_counts(thresholds, value_spots)
# select threshold where the kink of the distribution is located
optimal_threshold, _, _ = _get_breaking_point(thresholds, count_spots)
return optimal_threshold
|
7e461e225fe5a1a6c10f673f7096814e10a039ff
| 28,887 |
from typing import List
def _get_raw_test_commands(name: str) -> List[str]:
"""テストケースのinclude解決を行っていない状態の生のコマンドを取得する
Args:
name (str): テストケース名
Raises:
Exception: 取得に失敗した場合例外を送出します
Returns:
List[str]: テストコマンド配列
"""
testcase = _get_testcase_object(name)
return _convert_test_commands(testcase.testcase_data)
|
fcd70f4013dd26436bce48678c45ea0eccc1a8e6
| 28,888 |
import inspect
def extract_params(func, standard_args):
"""Return WhyNot parameters for user-defined function.
Performs error-checking to ensure parameters are disjoint from
standard arguments and all arguments to the function are either
standard arguments or parameters.
Parameters
----------
func: function
Possibly parameterized function.
standard_args: list
A list of possible arguments provided by the calling class itself
and shouldn't be treated as parameters for the function.
Returns
-------
params: `whynot.framework.ParameterCollection`
A collection of parameters for the func.
"""
if not callable(func):
msg = f"Trying to extract parameters from {func.__name__}, but not callable."
raise ValueError(msg)
# Extract parameters specified by the user via the @parameter decorator.
specified_params = ParameterCollection([])
if hasattr(func, PARAM_COLLECTION):
specified_params = getattr(func, PARAM_COLLECTION)
# Ensure standard_args is disjoint from the specified params.
for arg in standard_args:
if arg in specified_params:
msg = (
f"{arg} is both a parameter and a standard argument to {func.__name__}."
)
raise ValueError(msg)
# By construction, every element in specified_params
# must appear in the function signature, i.e. method_params.
method_args = inspect.signature(func).parameters
for arg in method_args:
if arg not in standard_args and arg not in specified_params:
msg = (
f"'{arg}' is in the signature of function {func.__name__}, "
f"but '{arg}' is not a standard argument or a parameter. "
f"Standard arguments: {', '.join(standard_args)}."
)
raise ValueError(msg)
return specified_params
|
2080bba7db140fbf4305f9138c39e944740c4c7a
| 28,889 |
def lark_to_float_value_node(tree: "Tree") -> "FloatValueNode":
"""
Creates and returns a FloatValueNode instance extracted from the parsing of
the tree instance.
:param tree: the Tree to parse in order to extract the proper node
:type tree: Tree
:return: a FloatValueNode instance extracted from the parsing of the tree
:rtype: FloatValueNode
"""
return FloatValueNode(
value=tree.children[0].value, location=lark_to_location_node(tree.meta)
)
|
34549e1615aa0b33c73de999ee43cbac047aa3ee
| 28,890 |
def confusion_matrix(pred, gt, thres=0.5):
"""Calculate the confusion matrix given a probablility threshold in (0,1).
"""
TP = np.sum((gt == 1) & (pred > thres))
FP = np.sum((gt == 0) & (pred > thres))
TN = np.sum((gt == 0) & (pred <= thres))
FN = np.sum((gt == 1) & (pred <= thres))
return (TP, FP, TN, FN)
|
da79757e54247b1fdcdcc5ddb39262386f1d2cbd
| 28,891 |
def do_heatmap(df):
"""
Make a bottom heatmap
"""
mask = np.zeros_like(df, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(df.values.tolist(), yticklabels=df.columns, xticklabels=df.columns, vmin=-1, vmax=1, center=0,
cmap=cmap, linewidths=.1, mask = mask)
return plt.gcf()
|
99b95c8a2dac1781f84200f9ad8807b35466627b
| 28,894 |
from typing import List
from typing import Optional
from typing import Tuple
from typing import Dict
from typing import Any
def docs2omop(
docs: List[Doc],
extensions: Optional[List[str]] = None,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Transforms a list of spaCy docs to a pair of OMOP tables.
Parameters
----------
docs : List[Doc]
List of documents to transform.
extensions : Optional[List[str]], optional
Extensions to keep, by default None
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame]
Pair of OMOP tables (`note` and `note_nlp`)
"""
df = pd.DataFrame(dict(doc=docs))
df["note_text"] = df.doc.apply(lambda doc: doc.text)
df["note_id"] = df.doc.apply(lambda doc: doc._.note_id)
df["note_datetime"] = df.doc.apply(lambda doc: doc._.note_datetime)
if df.note_id.isna().any():
df["note_id"] = range(len(df))
df["ents"] = df.doc.apply(lambda doc: list(doc.ents))
df["ents"] += df.doc.apply(lambda doc: list(doc.spans["discarded"]))
note = df[["note_id", "note_text", "note_datetime"]]
df = df[["note_id", "ents"]].explode("ents")
extensions = extensions or []
def ent2dict(
ent: Span,
) -> Dict[str, Any]:
d = dict(
start_char=ent.start_char,
end_char=ent.end_char,
note_nlp_source_value=ent.label_,
lexical_variant=ent.text,
# normalized_variant=ent._.normalized.text,
)
for ext in extensions:
d[ext] = getattr(ent._, ext)
return d
df["ents"] = df.ents.apply(ent2dict)
columns = [
"start_char",
"end_char",
"note_nlp_source_value",
"lexical_variant",
# "normalized_variant",
]
columns += extensions
df[columns] = df.ents.apply(pd.Series)
df["term_modifiers"] = ""
for i, ext in enumerate(extensions):
if i > 0:
df.term_modifiers += ";"
df.term_modifiers += ext + "=" + df[ext].astype(str)
df["note_nlp_id"] = range(len(df))
note_nlp = df[["note_nlp_id", "note_id"] + columns]
return note, note_nlp
|
d8c1325363aa84b5db00daa575d904ab586d79fa
| 28,895 |
def getWebVersion(d):
"""Get the version from the web of the catalog entry in d
Use the page at the url specified in d['version']['url'], and the regular
expression specified in d['version']['regex'] to find the latest version
number of the passed package. The d['version']['regexpos']'th match of the
regular expression is returned.
@param d The dictionary entry for a package, containing at least an entry
for 'version' that is a dictionary that contains a 'url', 'regex', and
'regexpos'
@return the version number matched by the regular expression and page
passed in.
"""
try:
ret = scrapePageDict(d['version'])
except KeyError:
print 'd did not contain a "version" entry'
print 'when calling getWebVersion(%s)' %d
except:
print 'unknown error running getWebVersion(%s)' % d
raise
else:
return ret
|
7a25a09c9e28276f9d7f1218a801cbfe8fd868ed
| 28,896 |
def stdev_outliers_proxy(self, *args, **kwargs):
"""
Calls :meth:`.stdev_outliers` on each table in the TableSet.
"""
return self._proxy('stdev_outliers', *args, **kwargs)
|
dd929a3278a0893138b6af801bdd9b1263a5a332
| 28,897 |
def load_cells():
"""Load cell data.
This cell dataset contains cell boundaries of mouse osteosarcoma
(bone cancer) cells. The dlm8 cell line is derived from dunn and is more
aggressive as a cancer. The cells have been treated with one of three
treatments : control (no treatment), jasp (jasplakinolide)
and cytd (cytochalasin D). These are drugs which perturb the cytoskelet
of the cells.
Returns
-------
cells : list of 650 planar discrete curves
Each curve represents the boundary of a cell in counterclockwise order,
their lengths are not necessarily equal.
cell_lines : list of 650 strings
List of the cell lines of each cell (dlm8 or dunn).
treatments : list of 650 strings
List of the treatments given to each cell (control, cytd or jasp).
"""
with open(CELLS_PATH) as cells_file:
cells = cells_file.read().split("\n\n")
for i, cell in enumerate(cells):
cell = cell.split("\n")
curve = []
for point in cell:
coords = [int(coord) for coord in point.split()]
curve.append(coords)
cells[i] = gs.cast(gs.array(curve), gs.float32)
with open(CELL_LINES_PATH) as cell_lines_file:
cell_lines = cell_lines_file.read().split("\n")
with open(CELL_TREATMENTS_PATH) as treatments_file:
treatments = treatments_file.read().split("\n")
return cells, cell_lines, treatments
|
2f0dc2aef62d01c863133e1d608b7e2afafea2c1
| 28,898 |
def approve_story(story: str):
"""
Moves a story from the pending file to the story file.
:param story: The story to approve.
:return:
"""
pending_data = load_pending_list()
story_to_approve = pending_data[story.lower()]
del pending_data[story.lower()]
update_pending_list(pending_data)
story_data = load_story_list()
story_data[story.lower()] = story_to_approve
update_story_list(story_data)
return f'Story "{story_name(story)}" has been approved.'
|
9f8c6f86a983637a4ab6574fa7acf3438d4ad1e3
| 28,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.