content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import importlib
def load_attr(str_full_module):
"""
Args:
- str_full_module: (str) correspond to {module_name}.{attr}
Return: the loaded attribute from a module.
"""
if type(str_full_module) == str:
split_full = str_full_module.split(".")
str_module = ".".join(split_full[:-1])
str_attr = split_full[-1]
module = importlib.import_module(str_module)
return getattr(module, str_attr)
else:
return str_full_module
|
f96dd56c73745e76ccc9c48dda4ba8a6592ab54b
| 31,785 |
from typing import Dict
from typing import List
import math
def conv(node: NodeWrapper,
params: Dict[str, np.ndarray],
xmap: Dict[str, XLayer]) -> List[XLayer]:
"""ONNX Conv to XLayer Conv conversion function"""
logger.info("ONNX Conv -> XLayer Conv (+ BiasAdd)")
assert len(node.get_outputs()) == 1
name = node.get_outputs()[0]
bottoms = node.get_inputs()
node_attrs = node.get_attributes()
iX = xmap[bottoms[0]] # NCHW
_, in_c, in_h, in_w = iX.shapes
W_name = bottoms[1]
wX = xmap[W_name] # OIHW
B_name = bottoms[2] if len(bottoms) == 3 else None
bX = xmap[B_name] if len(bottoms) == 3 else None
auto_pad = node_attrs['auto_pad'] if 'auto_pad' in node_attrs\
else 'NOTSET'
dilations = node_attrs['dilations'] if 'dilations' in node_attrs\
else [1, 1]
dil_h, dil_w = dilations
groups = node_attrs['group'] if 'group' in node_attrs\
else 1
kernel_shape = node_attrs['kernel_shape'] if 'kernel_shape' in node_attrs\
else wX.shapes[2:]
kernel_h, kernel_w = kernel_shape
pads = node_attrs['pads'] if 'pads' in node_attrs\
else None
strides = node_attrs['strides'] if 'strides' in node_attrs\
else [1, 1]
stride_h, stride_w = strides
channels = wX.shapes[0]
assert wX.shapes[1] == in_c // groups
assert auto_pad == 'NOTSET' or pads is None
if (auto_pad == 'NOTSET' and pads is None) or auto_pad == 'VALID':
padding = [0, 0, 0, 0] # ht, hb, wl, wr
elif auto_pad in ["SAME_UPPER", "SAME_LOWER"]:
out_h, out_w = int(math.ceil(in_h / stride_h)), int(math.ceil(in_w / stride_w))
pad_h = (out_h - 1) * stride_h + (dil_h * (kernel_h - 1) + 1) - in_h
pad_w = (out_w - 1) * stride_w + (dil_w * (kernel_w - 1) + 1) - in_w
if auto_pad == "SAME_UPPER":
pad_ht, pad_hb = pad_h // 2, pad_h - (pad_h // 2)
pad_wl, pad_wr = pad_w // 2, pad_w - (pad_w // 2)
else:
pad_ht, pad_hb = pad_h - (pad_h // 2), pad_h // 2
pad_wl, pad_wr = pad_w - (pad_w // 2), pad_w // 2
padding = [pad_ht, pad_hb, pad_wl, pad_wr]
else:
assert len(pads) % 2 == 0
half = len(pads) // 2
padding = []
for i in range(half):
padding.extend([pads[i], pads[i+half]])
# Quant_info (optional)
vai_quant_in = node_attrs['vai_quant_in']\
if 'vai_quant_in' in node_attrs else []
vai_quant_out = node_attrs['vai_quant_out']\
if 'vai_quant_out' in node_attrs else []
vai_quant_weights = node_attrs['vai_quant_weights']\
if 'vai_quant_weights' in node_attrs else []
vai_quant_biases = node_attrs['vai_quant_biases']\
if 'vai_quant_biases' in node_attrs else []
vai_quant = node_attrs['vai_quant']\
if 'vai_quant' in node_attrs else []
conv_name = name if B_name is None else name + '_Conv'
X = px.ops.conv2d(
op_name=px.stringify(conv_name),
input_layer=iX,
weights_layer=wX,
kernel_size=kernel_shape,
strides=strides,
padding_hw=padding,
dilation=dilations,
groups=groups,
channels=channels,
data_layout='NCHW',
kernel_layout='OIHW',
vai_quant=vai_quant,
vai_quant_in=vai_quant_in,
vai_quant_out=vai_quant_out,
vai_quant_weights=vai_quant_weights,
vai_quant_biases=vai_quant_biases,
onnx_id=name
)
res = [X]
if B_name is not None:
bias_add_X = xlf.get_xop_factory_func('BiasAdd')(
op_name=px.stringify(name),
axis=1,
input_layer=X,
bias_layer=bX,
onnx_id=name
)
res.append(bias_add_X)
return res
|
7b004f41d103796ed01bc46e7dcff156171b35bd
| 31,786 |
def yices_bvconst_int32(n, x):
"""Conversion of an integer to a bitvector constant, returns NULL_TERM (-1) if there's an error.
bvconst_int32(n, x):
- n = number of bits
- x = value
The low-order bit of x is bit 0 of the constant.
- if n is less than 32, then the value of x is truncated to
n bits (i.e., only the n least significant bits of x are considered)
- if n is more than 32, then the value of x is sign-extended to
n bits.
Error report:
if n = 0
code = POS_INT_REQUIRED
badval = n
if n > YICES_MAX_BVSIZE
code = MAX_BVSIZE_EXCEEDED
badval = n.
"""
# let yices deal with int32_t excesses
if n > MAX_INT32_SIZE:
n = MAX_INT32_SIZE
return libyices.yices_bvconst_int32(n, x)
|
b676ea0ea5b25f90b60f2efd67af553d835daa9b
| 31,787 |
from scipy.special import comb
def combination(n, k):
"""
组合数 n!/k!(n-k)!
:param n:
:param k:
:return:
"""
return comb(n, k, exact=True)
|
b87f9037decd765680e0e2d5b5dfea336e014b61
| 31,788 |
def create_carray(h5file_uri, type, shape):
"""Creates an empty chunked array given a file type and size.
h5file_uri - a uri to store the carray
type - an h5file type
shape - a tuple indicating rows/columns"""
h5file = tables.openFile(h5file_uri, mode='w')
root = h5file.root
return h5file.createCArray(
root, 'from_create_carray', type, shape=shape)
|
ca4c9605905a44b5f3027024f78cc855136472b0
| 31,790 |
def sort_dictionary_by_keys(input_dict):
"""
Sort the dictionary by keys in alphabetical order
"""
sorted_dict = {}
for key in sorted(input_dict.keys()):
sorted_dict[key] = input_dict[key]
return sorted_dict
|
225df2c16d2b21740603c224319ad4b0eaa0899d
| 31,791 |
def sorted_instructions(binview):
"""
Return a sorted list of the instructions in the current viewport.
"""
addrs = []
instructions = []
for ii in binview.instructions:
if ii[1] not in addrs:
instructions.append(instr(ii))
addrs.append(ii[1])
del addrs
instructions.sort(key=lambda x: x.address)
return instructions
|
5f8602b80a73fc4b66bbb6d2e70070f2e9e35397
| 31,792 |
def quick_sort(seq):
"""
Реализация быстрой сортировки. Рекурсивный вариант.
:param seq: любая изменяемая коллекция с гетерогенными элементами,
которые можно сравнивать.
:return: коллекция с элементами, расположенными по возрастанию.
Examples:
>>> quick_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
>>> quick_sort([])
[]
>>> quick_sort([-2, -5, -45])
[-45, -5, -2]
"""
length = len(seq)
if length <= 1:
return seq
else:
# В качестве pivot используется последний элемент.
pivot = seq.pop()
# lesser - часть коллекции, которая меньше pivot, будет тут.
# greater - части коллекции, которая меньше pivot, будет тут.
greater, lesser = [], []
for element in seq:
if element > pivot:
greater.append(element)
else:
lesser.append(element)
# Рекурсивно вызывается функция сортировки отдельно для
# greater и lesser. В конце все части объединяются в единую
# коллекцию. Между ними вставляется pivot.
return quick_sort(lesser) + [pivot] + quick_sort(greater)
|
46b56b5d29ca31a872e1805b66f4529a8bf48c6b
| 31,793 |
def _geocode(address):
"""
Like :func:`geocode` except returns the raw data instead.
:param str address: A location (e.g., "Newark, DE") somewhere in the United States
:returns: str
"""
key = _geocode_request(address)
result = _get(key) if _CONNECTED else _lookup(key)
if _CONNECTED and _EDITABLE:
_add_to_cache(key, result)
return result
|
f6cee8c606c5fe014c6c67787e0a9bcee70a0281
| 31,794 |
def easeOutBack(n, s=1.70158):
"""A tween function that overshoots the destination a little and then backs into the destination.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
"""
_checkRange(n)
n = n - 1
return n * n * ((s + 1) * n + s) + 1
|
bc5a0e34c2f7a16492c0456d8c28725888c6822c
| 31,795 |
def normalize_query_result(result, sort=True):
"""
Post-process query result to generate a simple, nested list.
:param result: A QueryResult object.
:param sort: if True (default) rows will be sorted.
:return: A list of lists of RDF values.
"""
normalized = [[row[i] for i in range(len(row))] for row in result]
return sorted(normalized) if sort else normalized
|
1df57ef889be041c41593766e1ce3cdd4ada7f66
| 31,796 |
from typing import List
def count_jobpairs(buildpairs: List) -> int:
"""
:param buildpairs: A list of build pairs.
:return: The number of job pairs in `buildpairs`.
"""
counts = [len(bp['jobpairs']) for bp in buildpairs]
return sum(counts)
|
30c345698400fd134456abcf7331ca2ebbfec10f
| 31,797 |
def unravel_params(nn_params, input_layer_size, hidden_layer_size,
num_labels, n_hidden_layers=1):
"""Unravels flattened array into list of weight matrices
:param nn_params: Row vector of model's parameters.
:type nn_params: numpy.array
:param input_layer_size: Number of units in the input layer.
:type input_layer_size: int
:param hidden_layer_size: Number of units in a hidden layer.
:type input_layer_size: int
:param num_labels: Number of classes in multiclass classification.
:type num_labels: int
:param n_hidden_layers: Number of hidden layers in network.
:type n_hidden_layers: int
:returns: array with model's weight matrices.
:rtype: numpy.array(numpy.array)
"""
input_layer_n_units = hidden_layer_size * (input_layer_size + 1)
hidden_layer_n_units = hidden_layer_size * (hidden_layer_size + 1)
theta = empty((n_hidden_layers + 1), dtype=object)
# input layer to hidden layer
theta[0] = nn_params[0:input_layer_n_units]
theta[0] = reshape(theta[0], (hidden_layer_size, (input_layer_size + 1)))
# hidden layer to hidden layer
for i in range(1, n_hidden_layers):
start = input_layer_n_units + (i - 1) * hidden_layer_n_units
end = input_layer_n_units + i * hidden_layer_n_units
theta[i] = nn_params[start:end]
theta[i] = reshape(
theta[i], (hidden_layer_size, (hidden_layer_size + 1)))
# hidden layer to output layer
start = input_layer_n_units + (n_hidden_layers - 1) * hidden_layer_n_units
theta[n_hidden_layers] = nn_params[start:]
theta[n_hidden_layers] = reshape(theta[n_hidden_layers],
(num_labels, (hidden_layer_size + 1)))
return theta
|
40703668ad74e4f6dbaf5c9c291da0c1c9528f60
| 31,798 |
def is_op_stack_var(ea, index):
""" check if operand is a stack variable """
return idaapi.is_stkvar(idaapi.get_flags(ea), index)
|
b041cc56d8a0f772223b96cf5fa8bd6e338c777f
| 31,799 |
def get_circles():
"""
Create 3 images each containing a circle at a different position.
Returns:
List([numpy array,]) of 3 circle images
"""
circles = []
x, y = np.meshgrid(range(256), range(256))
for d in range(30, 390, 120):
img = np.zeros((256, 256), dtype=np.uint8)
c = (np.sin(np.deg2rad(d)) * 50 + 128, -np.cos(np.deg2rad(d)) * 50 + 128)
r = 70
y_ = y - c[0]
x_ = x - c[1]
img[np.sqrt(y_**2 + x_**2) < r] = 255
circles.append(img)
return circles
|
4068c5eb028fd728596a1eddb7aa657993cc140b
| 31,800 |
def load_sequences( fasta_file ):
"""! @brief load candidate gene IDs from file """
sequences = {}
assembly_seq_order = []
with open( fasta_file ) as f:
header = f.readline()[1:].strip()
if " " in header:
header = header.split(' ')[0]
seq = []
line = f.readline()
while line:
if line[0] == '>':
sequences.update( { header: "".join( seq ) } )
assembly_seq_order.append( header )
header = line.strip()[1:]
if " " in header:
header = header.split(' ')[0]
seq = []
else:
seq.append( line.strip() )
line = f.readline()
sequences.update( { header: "".join( seq ) } )
assembly_seq_order.append( header )
return sequences, assembly_seq_order
|
7dd6cb97f457fee371556ab49a95ad12d52ca8dd
| 31,801 |
def compute_distance_fields(params, canvas_size, df=distance_to_cuboids):
"""Compute distance fields of size (canvas_size+2)^3 to specified primitives.
params -- [b, *]
canvas_size
df -- distance_to_*
"""
grid_pts = th.stack(th.meshgrid([th.linspace(-1/(canvas_size-1),
1+1/(canvas_size-1),
canvas_size+2)]*3), dim=-1).permute(0, 2, 1, 3).reshape(-1, 3)
if params.is_cuda:
grid_pts = grid_pts.cuda()
return df(grid_pts, params).view(params.size(0), params.size(1), 66, 66, 66)
|
4d6ab66f0d540e20b7f4b8782b9cce24e989be10
| 31,802 |
def view_user_posts(request, pk=None):
"""View your own posts or others posts"""
if pk is None:
pk = request.user.pk
user = get_object_or_404(User, pk=pk)
return render(request, 'home/view_user_posts.html',
{"pk": user.username, "user": user})
|
20be478cc98c99afeace4188549ec3728fa4221a
| 31,803 |
def find_all_strobogrammatic_numbers(n: int):
""" Returns all strobogrammatic numbers for a given number of digits.
Args:
int: number of digits
Returns:
A list of string with strobogrammatic numbers.
"""
result = numdef(n, n)
return result
|
9923386d85de5110044488cf3e491d6602755d37
| 31,804 |
def get_deploy_size_bytes(deploy: Deploy) -> int:
"""Returns size of a deploy in bytes.
:deploy: Deploy to be written in JSON format.
:returns: Size of deploy in bytes.
"""
size: int = len(deploy.hash)
for approval in deploy.approvals:
size += len(approval.signature)
size += len(approval.signer)
size += len(serialisation.deploy_to_bytes(deploy.header))
size += len(serialisation.deploy_to_bytes(
factory.create_deploy_body(deploy.payment, deploy.session))
)
return size
|
791c6ea249a9cdec39f4f02b9d88215868df6ebf
| 31,805 |
from typing import Any
from typing import get_args
def make_hetero_tuple_unstructure_fn(cl: Any, converter, unstructure_to=None):
"""Generate a specialized unstructure function for a heterogenous tuple."""
fn_name = "unstructure_tuple"
type_args = get_args(cl)
# We can do the dispatch here and now.
handlers = [
converter._unstructure_func.dispatch(type_arg)
for type_arg in type_args
]
globs = {f"__cattr_u_{i}": h for i, h in enumerate(handlers)}
if unstructure_to is not tuple:
globs["__cattr_seq_cl"] = unstructure_to or cl
lines = []
lines.append(f"def {fn_name}(tup):")
if unstructure_to is not tuple:
lines.append(" res = __cattr_seq_cl((")
else:
lines.append(" res = (")
for i in range(len(handlers)):
if handlers[i] == converter._unstructure_identity:
lines.append(f" tup[{i}],")
else:
lines.append(f" __cattr_u_{i}(tup[{i}]),")
if unstructure_to is not tuple:
lines.append(" ))")
else:
lines.append(" )")
total_lines = lines + [" return res"]
eval(compile("\n".join(total_lines), "", "exec"), globs)
fn = globs[fn_name]
return fn
|
a1ffa13bcf6488a79c6aacafd6f1e12112f99bb2
| 31,807 |
from typing import Optional
from typing import Mapping
def get_dps(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDpsResult:
"""
Use this data source to access information about an existing IotHub Device Provisioning Service.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.iot.get_dps(name="iot_hub_dps_test",
resource_group_name="iothub_dps_rg")
```
:param str name: Specifies the name of the Iot Device Provisioning Service resource.
:param str resource_group_name: The name of the resource group under which the Iot Device Provisioning Service is located in.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:iot/getDps:getDps', __args__, opts=opts, typ=GetDpsResult).value
return AwaitableGetDpsResult(
allocation_policy=__ret__.allocation_policy,
device_provisioning_host_name=__ret__.device_provisioning_host_name,
id=__ret__.id,
id_scope=__ret__.id_scope,
location=__ret__.location,
name=__ret__.name,
resource_group_name=__ret__.resource_group_name,
service_operations_host_name=__ret__.service_operations_host_name,
tags=__ret__.tags)
|
cd258d299319c794fcb97a123b1f6265d8405d1c
| 31,808 |
from cacao_accounting.database import Modulos
from typing import Union
def obtener_id_modulo_por_nombre(modulo: Union[str, None]) -> Union[str, None]:
"""Devuelve el UUID de un modulo por su nombre."""
if modulo:
MODULO = Modulos.query.filter_by(modulo=modulo).first()
return MODULO.id
else:
return None
|
2b66b7dafbf45c251717b673bc75db1bf2b99683
| 31,809 |
def qg8_graph_get_chunk(graph: qg8_graph, idx: int):
"""
Get a chunk from the graph according to its index provided the second argument `idx`
"""
if not isinstance(graph, qg8_graph):
raise TypeError("Argument is not a qg8_graph")
if idx > len(graph.chunks) - 1:
raise ValueError("Index exceeds the number of chunks in the graph")
return graph.chunks[idx]
|
8000d04a0e657842b1c3a5e0510ed81e26cffbde
| 31,810 |
def InterruptStartForwardForVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartForwardForVector(builder, numElems)
|
84cb4ffafc876554dfaded6c20bc08a00edc41ba
| 31,811 |
from .planning import Network # Imported here not to affect locals() at the top.
return Network(*merge_set)
def build_network(
operations,
cwd=None,
rescheduled=None,
endured=None,
parallel=None,
marshalled=None,
node_props=None,
renamer=None,
excludes=None,
):
"""
The :term:`network` factory that does :term:`operation merging` before constructing it.
:param nest:
see same-named param in :func:`.compose`
"""
kw = {
k: v
for k, v in locals().items()
if v is not None and k not in ("operations", "excludes")
}
def proc_op(op, parent=None):
"""clone FuncOperation with certain props changed"""
## Convey any node-props specified in the pipeline here
# to all sub-operations.
#
if kw:
op_kw = kw.copy()
if node_props:
op_kw["node_props"] = {**op.node_props, **node_props}
if callable(renamer):
def parent_wrapper(ren_args: RenArgs) -> str:
# Provide RenArgs.parent.
return renamer(ren_args._replace(parent=parent))
op_kw["renamer"] = parent_wrapper
op = op.withset(**op_kw)
## Last minute checks, couldn't check earlier due to builder pattern.
#
if hasattr(op, "fn"):
op.validate_fn_name()
if not op.provides:
TypeError(f"`provides` must not be empty!")
return op
merge_set = iset() # Preseve given node order.
for op in operations:
if isinstance(op, Pipeline):
merge_set.update(proc_op(s, op) for s in op.ops)
else:
merge_set.add(proc_op(op))
if excludes is not None:
excludes = {op for op in merge_set if op in asset(excludes, "excludes")}
if excludes:
merge_set = [op for op in merge_set if op not in excludes]
log.info("Compose excluded %i operations %s.", len(excludes), excludes)
assert all(bool(n) for n in merge_set)
|
f44ae5b68c7f2feb35b95b7e97d3ac5947e42933
| 31,812 |
def docker_volume_exist(volume):
"""Check if the docker-volume exists."""
cmd = "docker volume inspect {0}".format(volume)
status = execute_subprocess(cmd)
return status == 0
|
d96269dad83d594ae1c2eb5b04db1813699369e7
| 31,814 |
def get_embed_similarity(embed1, embed2, sim_measure="euclidean", num_top=None):
"""
Score alignments based on embeddings of two graphs
"""
if embed2 is None:
embed2 = embed1
if num_top is not None and num_top != 0: # KD tree with only top similarities computed
kd_sim = kd_align(embed1, embed2, distance_metric=sim_measure, num_top=num_top)
return kd_sim
# All pairwise distance computation
if sim_measure == "cosine":
similarity_matrix = sklearn.metrics.pairwise.cosine_similarity(embed1, embed2)
else:
similarity_matrix = sklearn.metrics.pairwise.euclidean_distances(embed1, embed2)
similarity_matrix = np.exp(-similarity_matrix)
return similarity_matrix
|
3f33f64f4dab3da42076956e083e7bd29e2909b1
| 31,815 |
from typing import get_args
def preprocess_meta_data():
""" preprocess the config for specific run:
1. reads command line arguments
2. updates the config file and set gpu config
3. configure gpu settings
4. define logger.
"""
args = get_args()
config = read_config(args)
gpu_init()
set_logger_and_tracker(config)
save_scripts(config)
return config
|
c7f4202fe3ccece2934999376af428ff5a72a164
| 31,818 |
def calc_water(scenario, years, days_in_year):
"""Calculate Water costs Function
Args:
scenario (object): The farm scenario
years (int): The no. of years the simulation will analyse
days_in_year (float): The number of days in a year
Returns:
cogs_water (list): Cost of Goods Sold expenditure on Water as a time series for each year
water_consumption (list): The amount of water consumed each year
"""
water_consumption = [0]
for y in range(years+1):
if y == 1:
water_consumption.append(scenario.system_quantity * 0.95 * days_in_year + (1900*12))
elif y > 1:
water_consumption.append((scenario.system_quantity * 0.95 * days_in_year + (1900*12)) * scenario.growing_area_mulitplier)
cogs_water = [i * scenario.water_price for i in water_consumption]
return cogs_water, water_consumption
|
ed23060e64c928a545897edef008b8b020d84d3c
| 31,819 |
def vit_large_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=32,
embed_dim=1024,
depth=24,
num_heads=16,
representation_size=1024,
**kwargs,
)
model = _create_vision_transformer(
"vit_large_patch32_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
|
51205d4fd518a1274f6ebc84916c0df968dc1eb2
| 31,820 |
def _check_template(hass, value_template):
""" Checks if result of template is true """
try:
value = template.render(hass, value_template, {})
except TemplateError:
_LOGGER.exception('Error parsing template')
return False
return value.lower() == 'true'
|
9de8bc9207a17cdb9902cd0d804a3e33531a93fb
| 31,821 |
def _snr_single_region(spectrum, region=None):
"""
Calculate the mean S/N of the spectrum based on the flux and uncertainty
in the spectrum.
Parameters
----------
spectrum : `~specutils.spectra.spectrum1d.Spectrum1D`
The spectrum object overwhich the equivalent width will be calculated.
region: `~specutils.utils.SpectralRegion`
Region within the spectrum to calculate the SNR.
Returns
-------
snr : `~astropy.units.Quantity` or list (based on region input)
Signal to noise ratio of the spectrum or within the regions
Notes
-----
This is a helper function for the above `snr()` method.
"""
if region is not None:
calc_spectrum = extract_region(spectrum, region)
else:
calc_spectrum = spectrum
if hasattr(spectrum, 'mask') and spectrum.mask is not None:
flux = calc_spectrum.flux[~spectrum.mask]
uncertainty = calc_spectrum.uncertainty.quantity[~spectrum.mask]
else:
flux = calc_spectrum.flux
uncertainty = calc_spectrum.uncertainty.quantity
# the axis=-1 will enable this to run on single-dispersion, single-flux
# and single-dispersion, multiple-flux
return np.mean(flux / uncertainty, axis=-1)
|
1a87cad818c17aad3f098731d8a37d809176b92b
| 31,822 |
def squared_error(
prediction: chex.Array,
targets: chex.Array,
weight: float = 1.0,
# register_loss: bool = True,
) -> chex.Array:
"""Squared error loss."""
if prediction.shape != targets.shape:
raise ValueError("prediction and targets should have the same shape.")
# if register_loss:
kfac_jax.register_squared_error_loss(prediction, targets, weight)
return weight * jnp.sum(jnp.square(prediction - targets), axis=-1)
|
bf078eab1fd0578b8f28a2cdcef4a325ed3ef18a
| 31,824 |
def add_vqe_exact(params):
"""Add the ground states to the dict"""
ex = run_exact(params["operator"])
vq = run_vqe(params["operator"])
return {**params, **vq, **ex}
|
548f347af1aff46b087d7abb6936a0fdf5556ca1
| 31,825 |
def post_demo(module_id):
"""
validation为数据验证装饰器,类似于wtf,这里做成yaml文件方便配置,易于阅读
返回值在tflask中有定制,这里使用字典也是为了更直观(不为了少些几个单词而牺牲可读性)
:param module_id:
:type module_id:
:return:
:rtype:
"""
post_form = parse_json_formdict('module_update')
code = update_module(module_id, post_form)
return {'code': code}
|
03c644cb4ea31a1e9c7bb0efea6a8e7194d12932
| 31,826 |
import logging
def local_angle_2D(points, times=None):
"""
Calculates 2-dimensional absolute step angles in radians where step angle
is deviation from a straight path when going from one time point to the next
Parameters
----------
points: np.ndarray
Array of track points in matrix coordinates (origin in TL, 0th coord
increases going downward, 1st coord increases going rightward)
times: iterable
Iterable containing time when each point was measured. If not None, used
to check for valid time intervals in autocorrelation calculation.
Returns
-------
features: dict
Dictionary of features to add into the track's feature dict
Includes:
step_angle_mean: Mean angle deviation from straight (in radians) of each step
step_angle_max: Maximum ""
step_angle_stdev: Standard deviation of ""
step_angle_autoCorr_lag1: Autocorrelation at 15 frame of ""
step_angle_autoCorr_lag2: Autocorrelation at 30 frame of ""
"""
point_diffs = np.diff(points, axis=0)
# Convert diffs to Cartesian X,Y coords (X increases going rightward, Y increases upwards)
diff_x = point_diffs[:, 1]
diff_y = -1 * point_diffs[:, 0]
# Get angle of each particle step
vec_angles = np.arctan2(diff_y, diff_x)
# Convert from [-π, π] to [0, 2π] so diff calc works properly
vec_angles = np.mod(vec_angles, 2 * np.pi)
delta_angles = np.diff(vec_angles)
# Convert back to [-π, π] and get absolute difference
# Using [-π, π] interval ensures we pick the smaller of the two angle differences (i.e., diff will not be > π)
delta_angles = np.abs(np.mod(delta_angles + np.pi, 2 * np.pi) - np.pi)
if len(delta_angles) > 0:
max_ang = np.max(delta_angles)
mean_ang = np.mean(delta_angles)
stdev_ang = np.std(delta_angles)
else:
logging.warning("Track is not long enough to extract step angle features.")
max_ang = np.inf
mean_ang = np.inf
stdev_ang = np.inf
# Autocorrelation
time_diffs = None
if times is not None:
time_diffs = np.diff(times)
ac_lag1, ac_lag2 = utils.normed_autocorr(delta_angles, lags=[15, 30],
time_intervals=time_diffs)
# Save the features
features = {
'step_angle_mean' : mean_ang,
'step_angle_max' : max_ang,
'step_angle_stdev' : stdev_ang,
'step_angle_autoCorr_lag1': ac_lag1,
'step_angle_autoCorr_lag2': ac_lag2
}
return features
|
d774061bf164bb3b835094fff5d4a0d20d449829
| 31,828 |
def calculate_geometry_stats(df):
"""Calculate total network miles, free-flowing miles (not in waterbodies),
free-flowing unaltered miles (not in waterbodies, not altered), total
perennial miles, total free-flowing unaltered perennial miles, and count of
segments.
Parameters
----------
df : DataFrame
must have length, waterbody, altered, and perennial, and be indexed on
networkID
Returns
-------
DataFrame
contains total_miles, free_miles, *_miles, segments
"""
# total lengths used for upstream network
total_length = df["length"].groupby(level=0).sum().rename("total")
perennial_length = (
df.loc[~df.intermittent, "length"].groupby(level=0).sum().rename("perennial")
)
intermittent_length = (
df.loc[df.intermittent, "length"].groupby(level=0).sum().rename("intermittent")
)
altered_length = (
df.loc[df.altered, "length"].groupby(level=0).sum().rename("altered")
)
unaltered_length = (
df.loc[~df.altered, "length"].groupby(level=0).sum().rename("unaltered")
)
perennial_unaltered_length = (
df.loc[~(df.intermittent | df.altered), "length"]
.groupby(level=0)
.sum()
.rename("perennial_unaltered")
)
# free lengths used for downstream network; these deduct lengths in waterbodies
free_length = df.loc[~df.waterbody, "length"].groupby(level=0).sum().rename("free")
free_perennial = (
df.loc[~(df.intermittent | df.waterbody), "length"]
.groupby(level=0)
.sum()
.rename("free_perennial")
)
free_intermittent = (
df.loc[df.intermittent & (~df.waterbody), "length"]
.groupby(level=0)
.sum()
.rename("free_intermittent")
)
free_altered_length = (
df.loc[df.altered & (~df.waterbody), "length"]
.groupby(level=0)
.sum()
.rename("free_altered")
)
free_unaltered_length = (
df.loc[~(df.waterbody | df.altered), "length"]
.groupby(level=0)
.sum()
.rename("free_unaltered")
)
free_perennial_unaltered = (
df.loc[~(df.intermittent | df.waterbody | df.altered), "length"]
.groupby(level=0)
.sum()
.rename("free_perennial_unaltered")
)
lengths = (
pd.DataFrame(total_length)
.join(perennial_length)
.join(intermittent_length)
.join(altered_length)
.join(unaltered_length)
.join(perennial_unaltered_length)
.join(free_length)
.join(free_perennial)
.join(free_intermittent)
.join(free_altered_length)
.join(free_unaltered_length)
.join(free_perennial_unaltered)
.fillna(0)
* METERS_TO_MILES
).astype("float32")
lengths.columns = [f"{c}_miles" for c in lengths.columns]
# calculate percent altered
lengths["pct_unaltered"] = (
100 * (lengths.unaltered_miles / lengths.total_miles)
).astype("float32")
# Note: if there are no perennial miles, this should be 0
lengths["pct_perennial_unaltered"] = 0
lengths.loc[lengths.perennial_miles > 0, "pct_perennial_unaltered"] = 100 * (
lengths.perennial_unaltered_miles / lengths.perennial_miles
)
lengths["pct_perennial_unaltered"] = lengths.pct_perennial_unaltered.astype(
"float32"
)
segments = df.groupby(level=0).size().astype("uint32").rename("segments")
return lengths.join(segments)
|
0e5a32aab297d23fcf77eec7649c662e03ee9bb5
| 31,829 |
def unshelve(ui, repo, *shelved, **opts):
"""restore a shelved change to the working directory
This command accepts an optional name of a shelved change to
restore. If none is given, the most recent shelved change is used.
If a shelved change is applied successfully, the bundle that
contains the shelved changes is deleted afterwards.
Since you can restore a shelved change on top of an arbitrary
commit, it is possible that unshelving will result in a conflict
between your changes and the commits you are unshelving onto. If
this occurs, you must resolve the conflict, then use
``--continue`` to complete the unshelve operation. (The bundle
will not be deleted until you successfully complete the unshelve.)
(Alternatively, you can use ``--abort`` to abandon an unshelve
that causes a conflict. This reverts the unshelved changes, and
does not delete the bundle.)
"""
abortf = opts['abort']
continuef = opts['continue']
if not abortf and not continuef:
cmdutil.checkunfinished(repo)
if abortf or continuef:
if abortf and continuef:
raise util.Abort(_('cannot use both abort and continue'))
if shelved:
raise util.Abort(_('cannot combine abort/continue with '
'naming a shelved change'))
try:
state = shelvedstate.load(repo)
except IOError, err:
if err.errno != errno.ENOENT:
raise
raise util.Abort(_('no unshelve operation underway'))
if abortf:
return unshelveabort(ui, repo, state, opts)
elif continuef:
return unshelvecontinue(ui, repo, state, opts)
elif len(shelved) > 1:
raise util.Abort(_('can only unshelve one change at a time'))
elif not shelved:
shelved = listshelves(repo)
if not shelved:
raise util.Abort(_('no shelved changes to apply!'))
basename = util.split(shelved[0][1])[1]
ui.status(_("unshelving change '%s'\n") % basename)
else:
basename = shelved[0]
if not shelvedfile(repo, basename, 'files').exists():
raise util.Abort(_("shelved change '%s' not found") % basename)
oldquiet = ui.quiet
wlock = lock = tr = None
try:
lock = repo.lock()
wlock = repo.wlock()
tr = repo.transaction('unshelve', report=lambda x: None)
oldtiprev = len(repo)
pctx = repo['.']
tmpwctx = pctx
# The goal is to have a commit structure like so:
# ...-> pctx -> tmpwctx -> shelvectx
# where tmpwctx is an optional commit with the user's pending changes
# and shelvectx is the unshelved changes. Then we merge it all down
# to the original pctx.
# Store pending changes in a commit
m, a, r, d = repo.status()[:4]
if m or a or r or d:
ui.status(_("temporarily committing pending changes "
"(restore with 'hg unshelve --abort')\n"))
def commitfunc(ui, repo, message, match, opts):
hasmq = util.safehasattr(repo, 'mq')
if hasmq:
saved, repo.mq.checkapplied = repo.mq.checkapplied, False
try:
return repo.commit(message, 'shelve@localhost',
opts.get('date'), match)
finally:
if hasmq:
repo.mq.checkapplied = saved
tempopts = {}
tempopts['message'] = "pending changes temporary commit"
tempopts['date'] = opts.get('date')
ui.quiet = True
node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
tmpwctx = repo[node]
ui.quiet = True
shelvedfile(repo, basename, 'hg').applybundle()
nodes = [ctx.node() for ctx in repo.set('%d:', oldtiprev)]
phases.retractboundary(repo, phases.secret, nodes)
ui.quiet = oldquiet
shelvectx = repo['tip']
# If the shelve is not immediately on top of the commit
# we'll be merging with, rebase it to be on top.
if tmpwctx.node() != shelvectx.parents()[0].node():
ui.status(_('rebasing shelved changes\n'))
try:
rebase.rebase(ui, repo, **{
'rev' : [shelvectx.rev()],
'dest' : str(tmpwctx.rev()),
'keep' : True,
})
except error.InterventionRequired:
tr.close()
stripnodes = [repo.changelog.node(rev)
for rev in xrange(oldtiprev, len(repo))]
shelvedstate.save(repo, basename, pctx, tmpwctx, stripnodes)
util.rename(repo.join('rebasestate'),
repo.join('unshelverebasestate'))
raise error.InterventionRequired(
_("unresolved conflicts (see 'hg resolve', then "
"'hg unshelve --continue')"))
# refresh ctx after rebase completes
shelvectx = repo['tip']
if not shelvectx in tmpwctx.children():
# rebase was a no-op, so it produced no child commit
shelvectx = tmpwctx
mergefiles(ui, repo, pctx, shelvectx)
shelvedstate.clear(repo)
# The transaction aborting will strip all the commits for us,
# but it doesn't update the inmemory structures, so addchangegroup
# hooks still fire and try to operate on the missing commits.
# Clean up manually to prevent this.
repo.unfiltered().changelog.strip(oldtiprev, tr)
unshelvecleanup(ui, repo, basename, opts)
finally:
ui.quiet = oldquiet
if tr:
tr.release()
lockmod.release(lock, wlock)
|
6656db08227788f216620651727213ad9eab9800
| 31,830 |
def SyntaxSpec(lang_id=0):
"""Syntax Specifications
@param lang_id: used for selecting a specific subset of syntax specs
"""
if lang_id == synglob.ID_LANG_JAVA:
return SYNTAX_ITEMS
else:
return list()
|
0de3ec2386efedc1472a977c1338759c61d70e8f
| 31,831 |
from bids import BIDSLayout
from ..utils import collect_participants
from nipype import logging as nlogging, config as ncfg
from ..__about__ import __version__
from ..workflow.base import init_xcpabcd_wf
from niworkflows.utils.misc import clean_directory
from yaml import load as loadyml
import uuid
from pathlib import Path
def build_workflow(opts, retval):
"""
Create the Nipype Workflow that supports the whole execution
graph, given the inputs.
All the checks and the construction of the workflow are done
inside this function that has pickleable inputs and output
dictionary (``retval``) to allow isolation using a
``multiprocessing.Process`` that allows fmriprep to enforce
a hard-limited memory-scope.
"""
build_log = nlogging.getLogger('nipype.workflow')
INIT_MSG = """
Running xcp_abcd version {version}:
* fMRIPrep directory path: {fmriprep_dir}.
* Participant list: {subject_list}.
* Run identifier: {uuid}.
""".format
fmriprep_dir = opts.fmriprep_dir.resolve()
output_dir = opts.output_dir.resolve()
work_dir = opts.work_dir.resolve()
if opts.clean_workdir:
build_log.info("Clearing previous xcp_abcd working directory: %s" % work_dir)
if not clean_directory(work_dir):
build_log.warning("Could not clear all contents of working directory: %s" % work_dir)
retval['return_code'] = 1
retval['workflow'] = None
retval['fmriprep_dir'] = str(fmriprep_dir)
retval['output_dir'] = str(output_dir)
retval['work_dir'] = str(work_dir)
if output_dir == fmriprep_dir:
build_log.error(
'The selected output folder is the same as the input fmriprep output. '
'Please modify the output path (suggestion: %s).',
fmriprep_dir / 'derivatives' / ('xcp_abcd-%s' % __version__.split('+')[0]))
retval['return_code'] = 1
return retval
if fmriprep_dir in work_dir.parents:
build_log.error(
'The selected working directory is a subdirectory of fmriprep directory. '
'Please modify the output path.')
retval['return_code'] = 1
return retval
# Set up some instrumental utilities
run_uuid = '%s_%s' % (strftime('%Y%m%d-%H%M%S'), uuid.uuid4())
retval['run_uuid'] = run_uuid
# First check that fmriprep_dir looks like a BIDS folder
layout = BIDSLayout(str(fmriprep_dir),validate=False, derivatives=True)
subject_list = collect_participants(
layout, participant_label=opts.participant_label)
retval['subject_list'] = subject_list
# Load base plugin_settings from file if --use-plugin
if opts.use_plugin is not None:
with open(opts.use_plugin) as f:
plugin_settings = loadyml(f)
plugin_settings.setdefault('plugin_args', {})
else:
# Defaults
plugin_settings = {
'plugin': 'MultiProc',
'plugin_args': {
'raise_insufficient': False,
'maxtasksperchild': 1,
}
}
nthreads = plugin_settings['plugin_args'].get('n_procs')
# Permit overriding plugin config with specific CLI options
if nthreads is None or opts.nthreads is not None:
nthreads = opts.nthreads
if nthreads is None or nthreads < 1:
nthreads = cpu_count()
plugin_settings['plugin_args']['n_procs'] = nthreads
if opts.mem_gb:
plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb
omp_nthreads = opts.omp_nthreads
if omp_nthreads == 0:
omp_nthreads = min(nthreads - 1 if nthreads > 1 else cpu_count(), 8)
if 1 < nthreads < omp_nthreads:
build_log.warning(
'Per-process threads (--omp-nthreads=%d) exceed total '
'threads (--nthreads/--n_cpus=%d)', omp_nthreads, nthreads)
retval['plugin_settings'] = plugin_settings
# Set up directories
log_dir = output_dir / 'xcp_abcd' / 'logs'
# Check and create output and working directories
output_dir.mkdir(exist_ok=True, parents=True)
log_dir.mkdir(exist_ok=True, parents=True)
work_dir.mkdir(exist_ok=True, parents=True)
# Nipype config (logs and execution)
ncfg.update_config({
'logging': {
'log_directory': str(log_dir),
'log_to_file': True
},
'execution': {
'crashdump_dir': str(log_dir),
'crashfile_format': 'txt',
'get_linked_libs': False,
},
'monitoring': {
'enabled': opts.resource_monitor,
'sample_frequency': '0.5',
'summary_append': True,
}
})
if opts.resource_monitor:
ncfg.enable_resource_monitor()
# Build main workflow
build_log.log(25, INIT_MSG(
version=__version__,
fmriprep_dir=fmriprep_dir,
subject_list=subject_list,
uuid=run_uuid)
)
retval['workflow'] = init_xcpabcd_wf (
layout=layout,
omp_nthreads=omp_nthreads,
fmriprep_dir=str(fmriprep_dir),
lower_bpf=opts.lower_bpf,
upper_bpf=opts.upper_bpf,
contigvol=opts.contigvol,
bpf_order=opts.bpf_order,
motion_filter_order=opts.motion_filter_order,
motion_filter_type=opts.motion_filter_type,
band_stop_min=opts.band_stop_min,
band_stop_max=opts.band_stop_min,
subject_list=subject_list,
work_dir=str(work_dir),
task_id=opts.task_id,
despike=opts.despike,
smoothing=opts.smoothing,
params=opts.nuissance_regressors,
cifti=opts.cifti,
output_dir=str(output_dir),
head_radius=opts.head_radius,
brain_template=opts.brain_template,
custom_conf=opts.custom_conf,
dummytime=opts.dummytime,
fd_thresh=opts.fd_thresh,
name='xcpabcd_wf'
)
retval['return_code'] = 0
logs_path = Path(output_dir) / 'xcp_abcd' / 'logs'
boilerplate = retval['workflow'].visit_desc()
if boilerplate:
citation_files = {
ext: logs_path / ('CITATION.%s' % ext)
for ext in ('bib', 'tex', 'md', 'html')
}
# To please git-annex users and also to guarantee consistency
# among different renderings of the same file, first remove any
# existing one
for citation_file in citation_files.values():
try:
citation_file.unlink()
except FileNotFoundError:
pass
citation_files['md'].write_text(boilerplate)
build_log.log(25, 'Works derived from this xcp execution should '
'include the following boilerplate:\n\n%s', boilerplate)
return retval
|
0158606b916b35607480637275a2abda5eb53774
| 31,832 |
def handler404(request):
"""
Utility handler for people wanting EXCEPTIONAL_INVASION, since we can't trap the Http404 raised when the
URLconf fails to recognise a URL completely. Just drop this in as handler404 in your urls.py. (Note that
if you subclass ExceptionalMiddleware to override its render() method, you won't be able to use this.)
Don't reference this from here, it's imported into exceptional_middleware directly.
"""
em = ExceptionalMiddleware()
return em.process_exception(request, HttpNotFound())
|
65c952f8c6f25b2e625fb27ebdcd054ea3ea76c8
| 31,833 |
def get_pipeline(name, outfolder):
"""
Build and return pipeline instance associated with given name.
:param str name: Name of the pipeline to build.
:param str outfolder: Path to output folder for use by pipeline instance.
:return SafeTestPipeline: A test-session-safe instance of a Pipeline.
"""
if name == read_aligner.__name__:
return get_read_aligner(outfolder)
elif name == call_peaks.__name__:
return get_peak_caller(outfolder)
else:
raise ValueError("Unknown pipeline request: '{}'".format(name))
|
539d0485e8bdfa679bc7901f2e2174a2fab0d161
| 31,834 |
def get_idw_interpolant(distances, p=2):
"""IDW interpolant for 2d array of distances
https://pro.arcgis.com/en/pro-app/help/analysis/geostatistical-analyst/how-inverse-distance-weighted-interpolation-works.htm
Parameters
----------
distances : array-like
distances between interpolation point and grid point
p : float, optional
power of inverse distance weighting, default=2
Returns
-------
np.array
weights
"""
is_1d = distances.ndim == 1
if is_1d:
distances = np.atleast_2d(distances)
MIN_DISTANCE = 1e-8
weights = np.zeros(distances.shape)
match = distances[:, 0] < MIN_DISTANCE
weights[match, 0] = 1
weights[~match, :] = 1 / distances[~match, :] ** p
denom = weights[~match, :].sum(axis=1).reshape(-1, 1) # *np.ones((1,n_nearest))
weights[~match, :] = weights[~match, :] / denom
if is_1d:
weights = weights[0]
return weights
|
fefc53b590ee083c176bb339ad309b285dfe1a1a
| 31,835 |
from typing import Iterable
def _render_html_attributes(attributes):
"""
Safely renders attribute-value pairs of an HTML element.
"""
if attributes and isinstance(attributes, dict):
def map_key(key):
# Map and case converts keys
return _kebab(ALIASES.get(key, key))
def map_value(key, value):
if key == CLASS:
# Reduce class values
if not isinstance(value, str) and isinstance(value, Iterable):
return _render_html_class(*value)
elif key == STYLE:
# Reduce style values
if isinstance(value, dict):
return _render_html_style(value)
# Reduce boolean values
if isinstance(value, bool):
if value:
return None
# Nothing to do
return value
def reduce_attributes():
return [render_attribute(map_key(k), v) for k, v in attributes.items() if not should_skip(v)]
def render_attribute(key, value):
return _render_html_attribute(key, map_value(key, value))
def should_skip(value):
return isinstance(value, bool) and not value
return SPACE + SPACE.join(reduce_attributes())
return EMPTY
|
c86d03c4fb316b1a3d74f377867d8a8609d55ff8
| 31,836 |
def interpolate_force_line2(form, x, tol=1E-6):
"""Interpolates a new point in a form polyline
Used by the `add_force_line` function
(I think it is assumed that the )
"""
if len(form) < 1:
raise ValueError('interpolate_force_line2 : form must not be an empty list')
form_out1 = [form[0]]
form_out2 = []
for pt1, pt2 in zip(form[:-1], form[1:]):
if (x - pt1[0] > 0.5 * tol and
pt2[0] - x > 0.5 * tol):
y = pt1[1] + (x - pt1[0]) * (pt2[1] - pt1[1]) / (pt2[0] - pt1[0])
form_out1.extend([[x, y]])
form_out2.extend([[x, y]])
if x - pt2[0] >= 0.5 * tol:
form_out1.append(pt2)
else:
form_out2.append(pt2)
# problems arise if form_out2 is an empty list
return form_out1, form_out2
|
82a0eac9132b7e631fd395bddf87595385cae574
| 31,837 |
def load_hdf5(infile):
"""
Load a numpy array stored in HDF5 format into a numpy array.
"""
with h5py.File(infile, "r", libver='latest') as hf:
return hf["image"][:]
|
3a9a83f03a32a029a0dda5e86ac9432c96ff62c6
| 31,838 |
import json
def enqueue_crawling_job(delegate_or_broadcast_svc, job_id, urls, depth):
"""
Used to enqueue a crawling job (or delegate a sub-url on a current job)
to the worker pool.
:type delegate_or_broadcast_svc: ZeroMQDelegatorService or
ZeroMQBroadcastService.
:param delegate_or_broadcast_svc: The web API service uses a
ZeroMQBroadcastService to announce new crawling jobs. The crawler
service uses ZeroMQDelegatorService to delegate any sub-links found
while scouring a page.
:param int job_id: The job ID that these URLs fall under.
:param set urls: The URLs to crawl. We'll send out one announcement
per URL.
:param int depth: The depth that this crawl will be at. 0 being initial.
:rtype: int
:returns: The number of crawler announcements made. One per URL.
"""
message_dict = {
'job_id': job_id,
'depth': depth
}
for url in urls:
message_dict['url'] = url
message_str = json.dumps(message_dict)
delegate_or_broadcast_svc.send_message(message_str)
return len(urls)
|
6a211346edd6f921bf26ed08adcee98cff066764
| 31,839 |
def show_control_sidebar_tabs():
"""Show the control sidebar tabs"""
# Default the show recent activity tab to true
default_dict = {
'SHOW_RECENT_ACTIVITY_TAB': True,
}
# Pull the settings for the control sidebar from settings.
# It is possible that the show recent activity will be
# overridden by the settings
control_sidebar_tabs = {
**default_dict,
**getattr(
settings,
'ADMINLTE2_ADMIN_CONTROL_SIDEBAR_TABS',
{}
),
}
# Get the number of tabs that should be shown. This sums up the values
# of the dictionary representing the control sidebar. Each value is a
# boolean. Despite the fact that they values are booleans, the sum operation
# will work since False = 0 and True = 1. Therefore, one setting set to
# true will yield True, and 3 settings set to try will yield 3.
number_of_tabs = sum(control_sidebar_tabs.values())
# If the number of tabs is greater than 1, we will turn the control sidebar
# into tabs. If it is 0 or 1 tab, it will not render the tabs part.
show_tabs = number_of_tabs > 1
return {
'show_csb_tabs': show_tabs,
'show_csb_recent_tab': control_sidebar_tabs.get(
'SHOW_RECENT_ACTIVITY_TAB', False
),
'show_csb_settings_tab': control_sidebar_tabs.get(
'SHOW_SETTINGS_TAB', False
),
'show_csb_extra_tab': control_sidebar_tabs.get(
'SHOW_EXTRA_TABS', False
),
}
|
f30147c39c507effa92e2cd6c5bc8f9dd84438b6
| 31,840 |
def unsubscribe(login, rec):
"""Unsubscribe from user
"""
try:
if login == env.user.login:
raise SubscribeError
if rec:
if users.check_subscribe_to_user_rec(login):
fn = users.unsubscribe_rec
else:
return xmpp_template('sub_rec_not', login=login)
else:
if users.check_subscribe_to_user(login):
fn = users.unsubscribe
else:
return xmpp_template('sub_not', login=login)
if not fn(login):
if rec:
return xmpp_template('sub_unsub_rec_ok', login=login)
else:
return xmpp_template('sub_unsub_ok', login=login)
except UserNotFound:
return xmpp_template('user_not_found', login=login)
except SubscribeError:
if rec:
return xmpp_template('sub_unsub_rec_error', login=login)
else:
return xmpp_template('sub_unsub_error', login=login)
|
c0d79eb011734de3a06c5d50e187e4ecaa7a6319
| 31,841 |
def build_script(configurators):
"""
Loops over configurators, calling gather_input on them and then renders the script sections
in a template.
"""
def gather_input(configurator):
cfg = configurator.Config()
dialog.set_background_title(cfg.description)
return {"config": cfg, "input": cfg.gather_input()}
def generate_script(input_data):
cfg = input_data["config"]
return {"name": cfg.description, "content": cfg.generate_script(input_data["input"])}
sections = map(generate_script, map(gather_input, configurators))
return render_template(__file__, "install", {"sections": sections, "shared_globals": shared_globals})
|
4a332c3c0ebb55074babb1872a586dd43276f63a
| 31,842 |
def fix_source_eol( path, is_dry_run = True, verbose = True, eol = '\n' ):
"""Makes sure that all sources have the specified eol sequence (default: unix)."""
if not os.path.isfile( path ):
raise ValueError( 'Path "%s" is not a file' % path )
try:
f = open(path, 'rb')
except IOError, msg:
print >> sys.stderr, "%s: I/O Error: %s" % (file, str(msg))
return False
try:
raw_lines = f.readlines()
finally:
f.close()
fixed_lines = [line.rstrip('\r\n') + eol for line in raw_lines]
if raw_lines != fixed_lines:
print '%s =>' % path,
if not is_dry_run:
f = open(path, "wb")
try:
f.writelines(fixed_lines)
finally:
f.close()
if verbose:
print is_dry_run and ' NEED FIX' or ' FIXED'
return True
|
e435e9856d14fdcaf1d833be4e8df7414d4c92eb
| 31,843 |
import logging
def truew(crse=None,
cspd=None,
hd=None,
wdir=None,
wspd=None,
zlr=DEFAULT_ZRL,
wmis=DEFAULT_MISSING_VALUES,
):
"""
FUNCTION truew() - calculates true winds from vessel speed, course and
relative wind
INPUTS
crse real Course TOWARD WHICH the vessel is moving over
the ground. Referenced to true north and the
fixed earth.
cspd real Speed of vessel over the ground. Referenced
to the fixed earth.
hd real Heading toward which bow of vessel is pointing.
Referenced to true north.
zlr real Zero line reference -- angle between bow and
zero line on anemometer. Direction is clockwise
from the bow. (Use bow=0 degrees as default
when reference not known.)
wdir real Wind direction measured by anemometer,
referenced to the ship.
wspd real Wind speed measured by anemometer,referenced to
the vessel's frame of reference.
wmis real Five element array containing missing values for
crse, cspd, wdir, wspd, and hd. In the output,
the missing value for tdir is identical to the
missing value specified in wmis for wdir.
Similarly, tspd uses the missing value assigned
to wmis for wspd.
*** WDIR MUST BE METEOROLOGICAL (DIRECTION FROM)! CRSE AND CSPD MUST
BE RELATIVE TO A FIXED EARTH! ***
OUTPUT VALUES:
tdir real True wind direction - referenced to true north
and the fixed earth with a direction from which
the wind is blowing (meteorological).
tspd real True wind speed - referenced to the fixed earth.
adir real Apparent wind direction (direction measured by
wind vane, relative to true north). IS
REFERENCED TO TRUE NORTH & IS DIRECTION FROM
WHICH THE WIND IS BLOWING. Apparent wind
direction is the sum of the ship relative wind
direction (measured by wind vane relative to the
bow), the ship's heading, and the zero-line
reference angle. NOTE: The apparent wind speed
has a magnitude equal to the wind speed measured
by the anemometer.
"""
# INITIALIZE VARIABLES
adir = 0
dtor = pi / 180
# Check course, ship speed, heading, wind direction, and
# wind speed for valid values (i.e. neither missing nor
# outside physically acceptable ranges).
err_mesg = []
if crse is None or crse < 0 or crse > 360 or crse == wmis[0]:
err_mesg.append('Bad or missing course: %g' % crse)
if cspd is None or cspd < 0 or cspd == wmis[1]:
err_mesg.append('Bad or missing cspd: %g' % cspd)
if wdir is None or wdir < 0 or wdir > 360 or wdir == wmis[2]:
err_mesg.append('Bad or missing wind dir: %g' % wdir)
if wspd is None or wspd < 0 or wspd == wmis[3]:
err_mesg.append('Bad or missing wind speed: %g' % wspd)
if hd is None or hd < 0 or hd > 360 or hd == wmis[4]:
err_mesg.append('Bad or missing heading: %g' % hd)
if zlr < 0.0 or zlr > 360.0:
err_mesg.append('Bad or missing zero line reference: %g' % zlr)
zlr = 0.0
if err_mesg:
logging.warning('TrueWinds: %s', '; '.join(err_mesg))
return (None, None, None)
# Convert from navigational coordinates to
# angles commonly used in mathematics
mcrse = 90 - crse
# Keep the value between 0 and 360 degrees
if (mcrse <= 0.0):
mcrse = mcrse + 360.0
# Calculate apparent wind direction
adir = hd + wdir + zlr
# Keep adir between 0 and 360 degrees
while adir >= 360.0:
adir = adir - 360.0
# Convert from meteorological coordinates to angles
# commonly used in mathematics
mwdir = 270.0 - adir
# Keep mdir between 0 and 360 degrees
if (mwdir <= 0.0):
mwdir = mwdir + 360.0
if (mwdir > 360.0):
mwdir = mwdir - 360.0
# Determine the east-west vector component and the
# north-south vector component of the true wind
x = wspd * cos(mwdir * dtor) + cspd * cos(mcrse * dtor)
y = wspd * sin(mwdir * dtor) + cspd * sin(mcrse * dtor)
# Use the two vector components to calculate the true wind
# speed
tspd = sqrt(x * x + y * y)
calm_flag = 1
# Determine the angle for the true wind
if (abs(x) > 1e-05):
mtdir = (atan2(y, x)) / dtor
else:
if (abs(y) > 1e-05):
mtdir = 180.0 - (90.0 * y) / abs(y)
else:
# The true wind speed is essentially zero: winds
# are calm and direction is not well defined
mtdir = 270.0
calm_flag = 0
# Convert from the common mathematical angle coordinate to
# the meteorological wind direction
tdir = 270.0 - mtdir
# Make sure that the true wind angle is between
# 0 and 360 degrees
while tdir < 0.0:
tdir = (tdir + 360.0) * calm_flag
while tdir > 360.0:
tdir = (tdir - 360.0) * calm_flag
# Ensure wmo convention for tdir = 360 for win
# from north and tspd > 0
if (calm_flag == 1 and (tdir < 0.0001)):
tdir = 360.0
return (tdir, tspd, adir)
|
b3a97b126ba9bc32bd1452c3278ec72512dc6dd8
| 31,844 |
def ascii_encode(string):
"""Return the 8-bit ascii representation of `string` as a string.
:string: String.
:returns: String.
"""
def pad(num):
binary = baseN(num, 2)
padding = 8 - len(binary)
return "0"*padding + binary
return "".join(pad(ord(c)) for c in string)
|
1a06c49514e5c320d299a93aa683f249470b26f7
| 31,845 |
import chunk
def print_statement(processor, composer, searcher):
# type: (Parser, Compiler, scanner.Scanner) -> Tuple[Parser, Compiler]
"""Evaluates expression and prints result."""
processor, composer = expression(processor, composer, searcher)
processor = consume(
processor,
searcher,
scanner.TokenType.TOKEN_SEMICOLON,
"Expect ';' after expression.",
)
return emit_byte(processor, composer, chunk.OpCode.OP_PRINT)
|
ecb930fb90c7cb88e874d85df42ba3d28225031d
| 31,846 |
def gmm_sky(values, **extras):
"""Use a gaussian mixture model, via expectation maximization.
of course, there's only one gaussian. could add another for
faint sources, bad pixels, but..."""
gmm = sklearn.mixture.GMM()
r = gmm.fit(values)
return r.means_[0, 0], np.sqrt(r.covars_[0, 0])
|
a41a386b931c2eaa71dd5927f5060548dac5dc33
| 31,847 |
import pytz
def getLocalTime(utc_dt, tz):
"""Return local timezone time
"""
local_tz = pytz.timezone(tz)
local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)
return local_dt
|
70789f61a90d991714fafe3c15917d1f1113fe8f
| 31,848 |
def SyncSleep(delay, name=None):
"""Pause for `delay` seconds (which need not be an integer).
This is a synchronous (blocking) version of a sleep op. It's purpose is
to be contrasted with Examples>AsyncSleep.
Args:
delay: tf.Tensor which is a scalar of type float.
name: An optional name for the op.
Returns:
The `delay` value.
"""
return examples_sync_sleep(delay=delay, name=name)
|
7015a756cc670efcafce9952269e3a7f2465e3fb
| 31,849 |
def admin():
"""
Adminページ
"""
return redirect(url_for('rooms'))
|
090d00f84204f58b22e83c468d368d928ea92e03
| 31,850 |
def timer(interval):
"""
Decorator for registering a function as a callback for a timer thread. Timer object deals with delegating event
to callback function.
"""
def decorator(f):
timers = timer.__dict__.setdefault('*timers', [])
timers.append(Timer(f, interval))
return f
return decorator
|
773fd5d072d7cc83eaf7512a6b6722b9df9ca1b1
| 31,851 |
def common_start(stra, strb):
""" returns the longest common substring from the beginning of stra and strb """
def _iter():
for a, b in zip(stra, strb):
if a == b:
yield a
else:
return ""
return ''.join(_iter())
|
365daf9e43a01e8a1871fbe88775880adba940a9
| 31,852 |
def auto_weight(X, Y, model, resolution=0.01, limits=0.05, **kwargs):
"""Automatically determine the weight to use in the CPD algorithm.
Parameters
----------
X : ndarray (N, D)
Fixed point cloud, an N by D array of N original observations in an n-dimensional space
Y : ndarray (M, D)
Moving point cloud, an M by D array of N original observations in an n-dimensional space
model : str or BaseCPD child class
The transformation model to use, available types are:
Translation
Rigid
Euclidean
Similarity
Affine
resolution : float
the resolution at which to sample the weights
limits : float or length 2 iterable
The limits of weight to search
kwargs : dictionary
key word arguments to pass to the model function when its called.
"""
# test inputs
model = choose_model(model)
try:
# the user has passed low and high limits
limit_low, limit_high = limits
except TypeError:
# the user has passed a single limit
limit_low = limits
limit_high = 1 - limits
# generate weights to test
ws = np.arange(limit_low, limit_high, resolution)
# container for various registrations
regs = []
# iterate through weights
for w in ws:
kwargs.update(weight=w)
reg = model(X, Y)
reg(**kwargs)
regs.append(reg)
# if the dimension of the data is less than 3 use the 1 norm
# else use the frobenius norm. This is a heuristic based on simulated data.
if reg.D < 3:
norm_type = 1
else:
norm_type = "fro"
# look at all the norms of the match matrices (The match matrix should be sparse
# and the norms we've chosen maximize sparsity)
norm = np.asarray([np.linalg.norm(reg.p_old, norm_type) for reg in regs])
# find the weight that maximizes sparsity
w = ws[norm.argmax()]
# update and run the model
kwargs.update(weight=w)
reg = model(X, Y)
reg(**kwargs)
# return the model to the user
return reg
|
3eb18c38725f434d2a2756acfe4ade01e5964b93
| 31,853 |
def max(self, parameter_max):
"""
It returns the maximum value of a parameter and the value's indexes.
Parameters
----------
parameter_max: str
Name of the parameter.
Returns
-------
max_dict: dict
Dictionary with the following format:
{
'<name of index 1>': <value of index 1>,
'<name of index n>': <value of index n>,
'name of parameter': < maximum value of parameter>
}
If max_dict is None, all the values of the parameter are NaN.
"""
df = self.data[parameter_max]
df = df.reset_index()
try:
max_dict = df.loc[
df[parameter_max] == df[parameter_max].max(skipna=True)].to_dict('records')[0]
except IndexError:
max_dict = None
return max_dict
|
8acf9c645a7c427d543c9087d66318ddf4765330
| 31,854 |
import json
def search(query):
"""
Searches your bucket. query can contain plaintext, and can also contain clauses
like $key:"$value" that search for exact matches on specific keys.
Returns either the request object (in case of an error) or a list of objects with the following keys:
key: key of the object
version_id: version_id of object version
operation: Create or Delete
meta: metadata attached to object
size: size of object in bytes
text: indexed text of object
source: source document for object (what is actually stored in ElasticSeach)
time: timestamp for operation
"""
es = _create_es()
payload = {'query': {'query_string': {
'default_field': 'content',
'query': query,
'quote_analyzer': 'keyword',
}}}
r = es.search(index=es_index, body=payload)
try:
results = []
for result in r['hits']['hits']:
key = result['_source']['key']
vid = result['_source']['version_id']
op = result['_source']['type']
meta = json.dumps(result['_source']['user_meta'])
size = str(result['_source']['size'])
text = result['_source']['text']
time = str(result['_source']['updated'])
results.append({
'key': key,
'version_id': vid,
'operation': op,
'meta': meta,
'size': size,
'text': text,
'source': result['_source'],
'time': time
})
results = list(sorted(results, key=lambda x: x['time'], reverse=True))
return results
except KeyError as e:
return r
|
7844e503eb65a97d5729498fad4119d7b78606cd
| 31,855 |
import re
def camel_to_snake(text: str) -> str:
"""
A helper function to convert `camelCase` to `snake_case`.
- e.g. `bestBigBrawlerTime` -> `best_big_brawler_time`
### Parameters
text: `str`
The text to restructure from `camelCase` to `snake_case`.
### Returns
`str`
The restructured `snake_case` text.
"""
return re.compile(r"(?<!^)(?=[A-Z])").sub("_", text).lower()
|
b9ac748bf0cc345c7cfb0bade1e4b1e9cbdf712c
| 31,857 |
def get_stats(distance_results):
"""
list order: median, mean, min, max
:param distance_results:
:return: dictionary of stats
"""
if len(distance_results) == 0:
return {'median': 0, 'mean': 0, 'min': 0, 'max': 0}
else:
return {'median': median(distance_results), 'mean': mean(distance_results), 'min': min(distance_results),
'max': max(distance_results)}
|
429a6b984f080fa351888177dd3d3f26e30d4a6e
| 31,858 |
from typing import Mapping
def create(collection_id):
"""Create a mapping.
---
post:
summary: Create a mapping
parameters:
- description: The collection id.
in: path
name: collection_id
required: true
schema:
minimum: 1
type: integer
example: 2
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/MappingCreate'
responses:
'200':
content:
application/json:
schema:
$ref: '#/components/schemas/Mapping'
description: OK
tags:
- Collection
- Mapping
"""
collection = get_db_collection(collection_id, request.authz.WRITE)
data = parse_request("MappingCreate")
mapping = Mapping.create(
load_query(),
get_table_id(data),
collection,
request.authz.id,
entityset_id=get_entityset_id(data),
)
db.session.commit()
return MappingSerializer.jsonify(mapping)
|
b0283e174ccc0ba2f50cad2d7f416d0f6b570025
| 31,861 |
def get_arc_proxy_lifetime(proxy_file=None):
"""
Returns the remaining lifetime of the arc proxy in seconds. When *proxy_file* is *None*, it
defaults to the result of :py:func:`get_arc_proxy_file`. Otherwise, when it evaluates to
*False*, ``arcproxy`` is queried without a custom proxy file.
"""
out = _arc_proxy_info(args=["--infoitem=validityLeft"], proxy_file=proxy_file)[1].strip()
try:
return int(out)
except:
raise Exception("no valid lifetime found in arc proxy: {}".format(out))
|
08c46e3f25db792ae4553a4670487f04d27e9898
| 31,863 |
def LLR_alt2(pdf, s0, s1):
"""
This function computes the approximate generalized log likelihood ratio (divided by N)
for s=s1 versus s=s0 where pdf is an empirical distribution and
s is the expectation value of the true distribution.
pdf is a list of pairs (value,probability). See
http://hardy.uhasselt.be/Fishtest/GSPRT_approximation.pdf
"""
s, var = stats(pdf)
return (s1 - s0) * (2 * s - s0 - s1) / var / 2.0
|
cb56260a390d1e9233e96e430835f851c222e30a
| 31,864 |
def load_configs(ex_dir):
""" Loads all configuration files into a list from the given experiment directory. """
configs = []
run_nums = get_run_nums(ex_dir)
for run_num in run_nums:
loc = ex_dir + '/' + run_num
try:
configs.append(extract_config(loc))
except:
raise Exception("Cannot load config in {}. Please remove this from the directory to proceed".format(loc))
return configs, run_nums
|
7413f3a310684c47b87bfc912fae39ab0e6067eb
| 31,865 |
from typing import Type
def create_app(item_model: Type[Item], db_session: Session) -> StacApi:
"""Create application with a custom sqlalchemy item"""
api = StacApi(
settings=ApiSettings(indexed_fields={"datetime", "foo"}),
extensions=[
TransactionExtension(
client=TransactionsClient(item_table=item_model, session=db_session)
)
],
client=CoreCrudClient(item_table=item_model, session=db_session),
)
return api
|
80407ff030569bc4df23296e1e156b3d116f0cdc
| 31,866 |
from typing import Dict
def reducemin(node: NodeWrapper,
params: Dict[str, np.ndarray],
xmap: Dict[str, XLayer]):
""" ONNX ReduceMin to XLayer AnyOp conversion function """
return generic_reduce("ReduceMin", node, params, xmap)
|
f8f6c84d9aa0a1f5f3f53a21e65938f939806f57
| 31,867 |
def mass(query,ts):
"""Calculates Mueen's ultra-fast Algorithm for Similarity Search (MASS) between a query and timeseries. MASS is a Euclidian distance similarity search algorithm."""
#query_normalized = zNormalize(np.copy(query))
m = len(query)
q_mean = np.mean(query)
q_std = np.std(query)
mean, std = movmeanstd(ts,m)
dot = slidingDotProduct(query,ts)
return np.sqrt(2*m*(1-(dot-m*mean*q_mean)/(m*std*q_std)))
|
89dc6b5da2e2197f3e96606d17912868a112b5f7
| 31,868 |
def concept_categories():
"""概念类别映射{代码:名称}"""
db = get_db()
collection = db['同花顺概念']
pipeline = [
{
'$project': {
'_id': 0,
'概念编码': 1,
'概念名称': 1,
}
}
]
ds = collection.aggregate(pipeline)
df = pd.DataFrame.from_records(ds)
try:
df.columns = ['code', 'name']
except ValueError:
raise NotImplementedError('本地数据库中"股票概念数据"为空,请运行`stock thsgn`')
df.sort_values('code', inplace=True)
return df.set_index('code').to_dict()['name']
|
0fd38a54ccfec50fbfeab438496a80b63bdf8a7f
| 31,869 |
def schema_validate(stack, schema_json):
"""Check that the schema is valid.
.. warning: TidyWidget index is hardcoded!
Parameters
----------
stack: pd.DataFrame
The stacked data.
schema_json: json
Full description
Returns
-------
"""
# Import library
# Check if stack index has duplicates...
# Should this be done outside in case our
# reports might be useful in a way probably not.
# Ensure that no index is repeated in stack
stack = stack.reset_index()
# Create schema
schema = schema_from_json(schema_json)
# Common columns
cols_schema = set([c.name for c in schema.columns])
cols_stack = set(stack.column.dropna().unique())
columns = cols_schema.intersection(cols_stack)
# Create report
report = ValidationReport()
report.add(1, "Processing schema validation...")
# Loop
for c in columns:
aux = stack[stack.column.isin([c])].rename(columns={'result': c})
# Validate schema
errors = schema.validate(aux, columns=[c])
# Complete report
if len(errors):
report.add(1, "\nTotal errors found in <{0}>: {1}".format(c, len(errors)))
# for e in errors:
# print(e)
report.add(2, stack.iloc[[error.row for error in errors]])
# Return
return True, report
|
5ad66d87c6ff09fecbc99338bd8e1fd1dc8a1ff9
| 31,870 |
def dict_to_config_str(config_dict):
"""Produces a version of a dict with keys (and some values) replaced with
shorter string version to avoid problems with over long file names in
tensorboard"""
key_abrv = {
"embedding_dimension": "ed",
"loss_type": "lt",
"initialize_uniform": "iu",
"k_negative_samples": "ns",
"distance_measure": "dm",
"margin": "mrgn",
"sample_negative_relations": "snr",
"bias": "b",
"feature_map_dimension": "fmd",
"fix_conv_layers": "fcl",
"fix_structure_embeddings": "fse",
"fix_word_embeddings": "fwd",
"pretrained_word_embeddings": "pwe",
"max_words_per_sentence": "mwps",
"vocab_dim": "vd",
"filter_sizes": "fs",
"dropout_keep_prob": "dkp",
"description_mode": "dm",
"l1_kernel_size": "l1ks",
"l2_kernel_size": "l2ks",
"entity_wd_type": "eWDt",
"rel_wd_type": "rWDt",
"type_wd": "tWD",
"type_rel_wd": "trWD",
"filt_top_t": "ftt",
"filt_btm_t": "fbt",
"emb_dropout": "edp"
}
val_abrv = {
None: "X",
False: "F",
True: "T",
"softplus": "sp"
}
entries = []
for name, value in config_dict.items():
key = key_abrv[name] if name in key_abrv else name
if type(value) == str or type(value) == bool:
value = val_abrv[value] if value in val_abrv else value
if type(value) == list:
value = "L" + "-".join([str(v) for v in value]) + "L"
# Skip (='delete') variable_device, no ones cares and the escape symbol messes
# with the generated file path
if key == "variable_device":
continue
entries.append((key, value))
return entries
|
da7d4ae8a58c2dab2d07616ae25438c8c0e0252d
| 31,871 |
import time
import json
def wait_erased(fout,
prog,
erased_threshold=20.,
interval=3.0,
prog_time=None,
passn=0,
need_passes=0,
timeout=None,
test=False,
verbose=False):
"""
erased_threshold: stop when this percent contiguous into a successful erase
Ex: if 99 iterations wasn't fully erased but 100+ was, stop at 120 iterations
interval: how often, in seconds, to read the device
"""
tstart = time.time()
# Last iteration timestamp. Used to "frame lock" reads at set interval
tlast = None
# Timestamp when EPROM was first half erased
dt_50 = None
dt_100 = None
iter = 0
nerased = 0
while True:
if tlast is not None:
while time.time() - tlast < interval:
time.sleep(0.1)
tlast = time.time()
dt_this = tlast - tstart
iter += 1
if timeout and dt_this >= timeout:
j = {
"type": "timeout",
'iter': iter,
'seconds': dt_this,
}
fout.write(json.dumps(j) + '\n')
fout.flush()
raise Exception("Timed out")
read_buf = prog.read()["code"]
erased, erase_percent = is_erased(read_buf, prog_dev=prog.device)
if erased or test:
nerased += 1
if not dt_100:
dt_100 = tlast - tstart
else:
nerased = 0
dt_100 = None
# Declare done when we've been erased for some percentage of elapsed time
complete_percent = 100.0 * nerased / iter
# Convert to more human friendly 100% scale
end_check = 100. * complete_percent / erased_threshold
j = {
"type": "read",
'iter': iter,
'seconds': dt_this,
'read': fw2str(read_buf),
'read_meta': "zlib",
'complete_percent': complete_percent,
'erase_percent': erase_percent,
'erased': erased
}
fout.write(json.dumps(j) + '\n')
fout.flush()
signature = hash8(read_buf)
print(
"pass %u / %u, iter % 3u @ %s: is_erased %u w/ erase_percent % 8.3f%%, sig %s, end_check: %0.1f%%"
% (
passn,
need_passes,
iter,
util.time_str_sec(dt_this),
erased,
erase_percent,
signature,
#
end_check))
if dt_50 is None and erase_percent >= 50 or test:
dt_50 = tlast - tstart
print("50%% erased after %0.1f sec" % (dt_50, ))
if end_check >= 100.0 or test:
break
dt_120 = tlast - tstart
print("Erased 100%% after %0.1f sec" % (dt_100, ))
print("Erased 120%% after %0.1f sec" % (dt_120, ))
j = {
"type": "footer",
"erase_time": dt_100,
"run_time": dt_120,
"half_erase_time": dt_50
}
if prog_time is not None:
j["prog_time"] = prog_time
fout.write(json.dumps(j) + '\n')
fout.flush()
return dt_100, dt_50
|
8ce4e6739d1d911353039c59ac8177f0733ea64d
| 31,872 |
def settle(people, rates, bills, decimal_places=2):
"""Calculate the smallest set of transactions needed to settle debts.
For each person we sum the money spent for her and subtract the money she
paid. We get the amount she should pay (it does not matter to whom from her
point of view). After she does, she should get 0 and pay 0, therefore she
is solved. The person who got her money now should pay more (or get less).
So in each step we choose one yet unsolved person, give the money she
should pay to someone else who has not been solved yet and update the money
he is due. In n-1 steps we settle all debts (n is the number of people).
Arguments
people: list of strings - names of all the people. (Needed for All shortcut
in csv)
rates: dict of str -> float - exchange rates of currencies to the
currency in which debts should be paid. Exchange rate for each currency
(other than final_currency) that appears in filename should be
specified.
bills: pd.DataFrame - bills. Should have columns Name (of the person who
paid), What (has been paid), Amount (of money paid), Currency (in which
it has been paid; for example "EUR"), For (whom it has been paid;
either ["All"] or list of names or ["AllBut"] + list of names, for
example ["AllBut", "Adam", "David"]).
decimal_places: int - number of decimal places to which the amounts to be
paid should be rounded. Default 2.
Return list of tuples (who:str, whom:str, how_much_should_pay:float)
"""
df = bills.copy()
# add column to df - amount of money in the final currency
try:
df["AmountInCUR"] = df.Currency.map(lambda x: rates[x]) * df.Amount
except KeyError as k:
raise ValueError("Exchange rate for %s not specified" % (k.args[0]))
# add lines to df for each person paying nothing for all
df_people = pd.DataFrame({
"Name": people,
"AmountInCUR": [0] * len(people),
"For": ["All"] * len(people)
})
df = df.append(df_people)
# dict {"person": money spent for her, regardless who paid}
money_spent = {}
for person in people:
money_spent.setdefault(person, 0)
for amount, fw in zip(df.AmountInCUR, df.For):
if fw[0] == "All":
money_spent[person] += amount / len(people)
elif fw[0] == "AllBut" and person not in fw[1:]:
money_spent[person] += amount / (len(people) - (len(fw)-1))
elif fw[0] != "AllBut" and person in fw:
money_spent[person] += amount / len(fw)
# just creating pd.DataFrame from dict
tmp = pd.DataFrame.from_records(list(money_spent.items()))
tmp.columns = ["Name", "MoneySpent"]
# create pd.DataFrame with columns "Name", "MoneySpent", "MoneyPaid",
# "ShouldPay"; one row for each person
sums = df.filter(["Name", "AmountInCUR"]).groupby("Name").sum()
sums.columns = ["MoneyPaid"]
sums = pd.merge(tmp, sums, left_on="Name", right_index=True)
sums["ShouldPay"] = sums.MoneySpent - sums.MoneyPaid
howto = sums.sort("ShouldPay")
# set of tuples (who, whom, how_much_should_pay)
payments = set()
# topay - list of lists [how_much_should_pay, "who"]
topay = [[p, w] for p, w in zip(howto.ShouldPay, howto.Name) if p != 0]
# Keep people sorted by the amount of money they should pay.
# At each step take all the money the last one in the list should pay (it
# is the one who should pay most) and give it to the first one -> the last
# one is solved and removed from the list. Because the overall amount of
# money that should be paid is equal to the overall amount of money that
# should be received, in at most n-1 steps all the debts are settled (where
# n is the number of people involved).
while len(topay) > 1:
pay, who = topay.pop()
topay[0][0] += pay
if pay > 0:
payments.add((who, topay[0][1], round(pay, decimal_places)))
topay.sort()
return set(payments)
|
25890a647a492999fe44e668d2270f359fbc3863
| 31,874 |
import ast
def extract_ast_class_def_by_name(ast_tree, class_name):
"""
Extracts class definition by name
:param ast_tree: AST tree
:param class_name: name of the class.
:return: class node found
"""
class ClassVisitor(ast.NodeVisitor):
"""
Visitor.
"""
def __init__(self):
self.found_class_node = None
def visit_ClassDef(self, node): # pylint: disable=invalid-name
"""
Visit class definition.
:param node: node.
:return:
"""
if node.name == class_name:
self.found_class_node = node
visitor = ClassVisitor()
visitor.visit(ast_tree)
return visitor.found_class_node
|
011f1cb8d965db8e30e6f4281704a6140103946b
| 31,875 |
def getAttribute(v, key):
"""
This function is mainly for retrive @@transducer/xxx property, and fantasy-land/xxx property.
We assume dict/object in Python may own such properties.
dict case:
d = {'@@transducer/init': lambda: True}
init_fn = getAttribute(d, '@@transducer/init')
obj case:
class T:
def init(self):
return True
def get(self, type):
if type == '@@transducer/init':
return self.init
t = T()
init_fn = getAttribute(t, '@@transducer/init')
method case 1:
class Mapper:
def map(fn):
return fn
m = Mapper()
map_fn = getAttribute(m, 'map')
method case 2:
class Mapper:
def map(self, fn):
return fn
m = Mapper()
map_fn = getAttribute(m, 'map')
return: function got from key, otherwise None
"""
if isinstance(v, dict) and key in v:
return v[key]
if _has(v, key):
return getattr(v, key, None)
if _has(v, 'get'):
try:
# Case that get is (key, default) -> value signature
return v.get(key, default=None)
except TypeError:
try:
# Case that get is a instance method with (self, key, default) -> value signature
return v.get(v, key, default=None)
except TypeError:
# Unknown signature
return None
|
d44d31bb6734637c56554e040d93620bb2d617e0
| 31,876 |
def getdata(mca):
""" Extract the data contained in spectrum files
INPUT:
mca; path
OUTPUT:
Data; 1D-array """
name = str(mca)
name = name.split("\\")[-1]
name = name.replace('_',' ')
# custom MC generated files
if 'test' in name or 'obj' in name or 'newtest' in name:
Data = []
datafile = open(mca)
lines = datafile.readlines()
for line in lines:
line = line.split()
try: counts = float(line[1])
except: counts = float(line[0])
counts = counts * 10e3
Data.append(counts)
Data = np.asarray(Data)
# this works for mca extension files
else:
ObjectData=[]
datafile = open(mca)
line = datafile.readline()
line = line.replace("\r","")
line = line.replace("\n","")
# AMPTEK files start with this tag
if "<<PMCA SPECTRUM>>" in line:
while "<<DATA>>" not in line:
line = datafile.readline()
if line == "": break
line = datafile.readline()
while "<<END>>" not in line:
try: ObjectData.append(int(line))
except ValueError as exception:
datafile.close()
raise exception.__class__.__name__
line = datafile.readline()
if line == "": break
# Works if file is just counts per line
elif line.isdigit():
while "<<END>>" not in line:
ObjectData.append(int(line))
line = datafile.readline()
if line == "": break
# if file has two columns separated by space or tab
elif "\t" in line or " " in line:
while "<<END>>" not in line:
counts = line.split("\t")[-1]
if counts.isdigit(): ObjectData.append(int(counts))
else:
counts = line.split(" ")[-1]
ObjectData.append(int(counts))
line = datafile.readline()
if line == "": break
Data = np.asarray(ObjectData)
datafile.close()
return Data
|
4b89f6a6d93cf174c6791fb908482958d7651220
| 31,877 |
def merge_dicts(dict_a, dict_b):
"""Recursively merge dictionary b into dictionary a.
If override_nones is True, then
"""
def _merge_dicts_(a, b):
for key in set(a.keys()).union(b.keys()):
if key in a and key in b:
if isinstance(a[key], dict) and isinstance(b[key], dict):
yield (key, dict(_merge_dicts_(a[key], b[key])))
elif b[key] is not None:
yield (key, b[key])
else:
yield (key, a[key])
elif key in a:
yield (key, a[key])
elif b[key] is not None:
yield (key, b[key])
return dict(_merge_dicts_(dict_a, dict_b))
|
1ce3d253ceb467ead2bee8efb2652b81191147d0
| 31,878 |
import torch
def adjust_contrast(inp, contrast_factor):
"""
Adjust Contrast of an image.
This implementation aligns OpenCV, not PIL. Hence, the output differs from TorchVision.
The inp image is expected to be in the range of [0, 1].
"""
if not isinstance(inp, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(inp)}")
if not isinstance(contrast_factor, (float, torch.Tensor,)):
raise TypeError(f"The factor should be either a float or torch.Tensor. "
f"Got {type(contrast_factor)}")
if isinstance(contrast_factor, float):
contrast_factor = torch.tensor([contrast_factor])
contrast_factor = contrast_factor.to(inp.device).to(inp.dtype)
if (contrast_factor < 0).any():
raise ValueError(f"Contrast factor must be non-negative. Got {contrast_factor}")
for _ in inp.shape[1:]:
contrast_factor = torch.unsqueeze(contrast_factor, dim=-1)
# Apply contrast factor to each channel
x_adjust = inp * contrast_factor
# Truncate between pixel values
out = torch.clamp(x_adjust, 0.0, 1.0)
return out
|
9e98f1b0cf11efd6c9aa14b4ff35701605690134
| 31,879 |
def default_tiling():
"""Return default tiling options for GeoTIFF driver.
Returns
-------
dict
GeoTIFF driver tiling options.
"""
return {"tiled": True, "blockxsize": 256, "blockysize": 256}
|
c2d78f2d87478121cc52124d0b33edde5850a10a
| 31,880 |
import importlib
def read_template():
"""Loads a html template for jinja.
Returns:
str: The loaded template.
"""
html = 'image_template.html'
package = '.'.join([__name__, 'templates'])
return importlib.resources.read_text(package, html)
|
f6288076ed32c2ed6db86e91595c4a635c9ce5b0
| 31,881 |
def parse_training_labels(train_box_df, train_image_dirpath):
"""
Reads and parses CSV file into a pandas dataframe.
Example:
{
'patientId-00': {
'dicom': path/to/dicom/file,
'label': either 0 or 1 for normal or pnuemonia,
'boxes': list of box(es)
},
'patientId-01': {
'dicom': path/to/dicom/file,
'label': either 0 or 1 for normal or pnuemonia,
'boxes': list of box(es)
}, ...
}
"""
# Define lambda to extract coords in list [x, y, width, height]
def extract_box(row):
return [row['x'], row['y'], row['width'], row['height']]
parsed = {}
for n, row in train_box_df.iterrows():
# Initialize patient entry into parsed
pid = row['patientId']
if pid not in parsed:
parsed[pid] = {
'dicom': train_image_dirpath + '/%s.dcm' % pid,
'label': row['Target'],
'boxes': []}
# Add box if opacity is present
if parsed[pid]['label'] == 1:
parsed[pid]['boxes'].append(extract_box(row))
return parsed
|
1b6765c6c3ffe261cd6d796fd1f97154008a5126
| 31,882 |
from typing import Optional
from typing import Union
def retryable_request(retries: Optional[Union[int, Retry]] = 1, *,
session: Session = None,
backoff_factor: float = None) -> Session:
"""Возвращает объект Session, который способен повторять запрос указанное число раз.
:param retries: Количество повторов.
:param session: Объект сессии.
:param backoff_factor: Увеличивающаяся задержка между повторами.
"""
assert retries is None or isinstance(retries, (int, Retry))
retries = 1 if retries is None else retries
backoff_factor = 0 if backoff_factor is None else backoff_factor
session = Session() if session is None else session
if isinstance(retries, int):
retry = Retry(total=retries, read=retries, connect=retries, backoff_factor=backoff_factor)
else:
retry = retries
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
|
eb2396d9284eb9693643339477ab2dbb5aadcb8c
| 31,883 |
def get_timeouts(timeout=None):
"""Ensures that both read and connect timeout are passed in
final patched requests call. In case any of it is missing in initial
call by the caller, default values are added"""
if timeout:
return timeout
return (Config.connect_timeout(), Config.read_timeout())
|
f2545334efdf0e1c740cffa5554a8ee8c91eda0c
| 31,884 |
import math
def populateDayOverview(excelDataConsolidated:ExcelDataConsolidation, overviewSheetData) -> DayOverview:
"""Populates the Day Overview Object
Arguments:
excelDataConsolidated {ExcelDataConsolidation} -- [description]
overviewSheetData {[type]} -- [description]
Returns:
DayOverview -- [description]
"""
if (math.isnan(overviewSheetData["Prayer"][0])):
prayer = PRAYER_DEFAULT
else:
prayer = not PRAYER_DEFAULT
return DayOverview(excelDataConsolidated.getDateRecorded(), excelDataConsolidated.getStartTime(),
excelDataConsolidated.getLeavingTime(), excelDataConsolidated.getWorkHours(), excelDataConsolidated.getBreakHours(),
excelDataConsolidated.getLunchDuration(), getMorningScheduleId(overviewSheetData["Morning"][0]), prayer)
|
6969e9c047d1206df059c687f4d9b2e13ab6cc61
| 31,885 |
async def get_loading_images(mobi_app: str = "android", platform: str = "android",
height: int = 1920, width: int = 1080,
build: int = 999999999, birth: str = "",
credential: Credential = None):
"""
获取开屏启动画面
Args:
build (int, optional) : 客户端内部版本号
mobi_app (str, optional) : android / iphone / ipad
platform (str, optional) : android / ios / ios
height (int, optional) : 屏幕高度
width (int, optional) : 屏幕宽度
birth (str, optional) : 生日日期(四位数,例 0101)
credential (Credential, optional): 凭据
Returns:
API 调用返回结果
"""
credential = credential if credential is not None else Credential()
api = API["splash"]["list"]
params = {
"build": build,
"mobi_app": mobi_app,
"platform": platform,
"height": height,
"width": width,
"birth": birth
}
return await request('GET', api['url'], params, credential=credential)
|
032f7ab911dc48dfbec2b09b33863230c4295c67
| 31,886 |
def cross_energy(sig, eps, mesh):
"""
Calcul de l'energie croisée des champs de contrainte sig et de deformation eps.
"""
return fe.assemble(fe.inner(sig, eps) * fe.dx(mesh))
|
b6bcf3a9c2885e1d3bf6b8c0f3e30454854ed474
| 31,887 |
import requests
def login():
"""
登录API
:return: True: 登录成功; False: 登录失败
"""
ctrl.BEING_LOG = True
if check_login_status():
ctrl.BEING_LOG = False
return True
error_times = 0
while True:
try:
update_proxy()
update_cookies()
username = change_to_base64(account.username)
password = change_to_base64(account.password)
captcha = get_captcha()
logger.info('验证码识别结果:%s' % captcha)
form_data = url_login.get('form_data')
form_data.__setitem__('j_validation_code', captcha)
form_data.__setitem__('j_username', username)
form_data.__setitem__('j_password', password)
resp = requests.post(url=url_login.get('url'), headers=url_login.get('headers'), data=form_data,
cookies=ctrl.COOKIES, proxies=ctrl.PROXIES, timeout=TIMEOUT)
if resp.text.find(account.username + ',欢迎访问') != -1:
# 网站调整了逻辑,下面这句不用了
# ctrl.COOKIES.__delitem__('IS_LOGIN')
# ctrl.COOKIES.set('IS_LOGIN', 'true', domain='www.pss-system.gov.cn/sipopublicsearch/patentsearch')
jsession = ctrl.COOKIES.get('JSESSIONID')
ctrl.COOKIES = resp.cookies
ctrl.COOKIES.set('JSESSIONID', jsession, domain='www.pss-system.gov.cn')
# logger.info(jsession)
update_cookies(ctrl.COOKIES)
ctrl.BEING_LOG = False
logger.info('登录成功')
return True
else:
if error_times > 5:
break
logger.error('登录失败')
error_times += 1
except:
pass
ctrl.BEING_LOG = False
return False
|
a34ed7b8f44680b826d648193150c0387af18e7a
| 31,888 |
def logout():
"""Logout"""
next_url = request.args.get('next') if 'next' in request.args else None
# status = request.args.get('status') if 'status' in request.args else None
# client_id = request.args.get('client_id') if 'client_id' in request.args else None
logout_user()
return redirect(next_url if next_url else '/')
|
0cb55a1845386e655f313ee519a84b1ab548b971
| 31,889 |
from operator import and_
def find_all(query, model, kwargs):
"""
Returns a query object that ensures that all kwargs
are present.
:param query:
:param model:
:param kwargs:
:return:
"""
conditions = []
kwargs = filter_none(kwargs)
for attr, value in kwargs.items():
if not isinstance(value, list):
value = value.split(',')
conditions.append(get_model_column(model, attr).in_(value))
return query.filter(and_(*conditions))
|
84e24214d21cb70a590af395451f250002de21bf
| 31,890 |
def ot1dkl(x, y, M, epsilon, log=True, **kwargs):
"""General OT 2d function."""
if log:
ot = ot1dkl_log
else:
ot = ot1dkl_
output = ot(x, y, M, epsilon, **kwargs)
return output
|
6c56ca504b5cfffdc45f0a1931ae33cfe8b5fe8c
| 31,891 |
def normalize_element_without_regex(filter, field):
"""
mongomock does not support regex...
"""
if len(filter) > 0:
filters = {field: { "$in": filter}}
else:
filters = {}
print filters
return filters
|
55fa5d108891819d9e8ee208b6475aed95b49b2a
| 31,892 |
def _pad_random(m, new_rows, rand):
"""Pad a matrix with additional rows filled with random values."""
rows, columns = m.shape
low, high = -1.0 / columns, 1.0 / columns
suffix = rand.uniform(low, high, (new_rows, columns)).astype(REAL)
return vstack([m, suffix])
|
9ac694ae0f8451f2b18f75acfa414f07c5a1fbf9
| 31,893 |
def quantize_sequences(sequences, alphabet):
"""Giving prescribing alphabet, quantize each caracter using index in alphabet
in each sequence.
input:
sequences: [str]
return:
[[int]]
"""
print("quantizing sequences...")
new_sequences = []
for sequence in sequences:
new_sequence = []
# add width to fit the conv2D of TF
for character in sequence.lower():
if character in alphabet:
new_sequence.append(alphabet.index(character))
else:
new_sequence.append(len(alphabet))
new_sequences.append(new_sequence)
return new_sequences
|
7b8a870d72d6b0a9568fba8d96a1d3c2e422ff59
| 31,894 |
def search(context, mpd_query):
"""
*musicpd.org, music database section:*
``search {TYPE} {WHAT} [...]``
Searches for any song that contains ``WHAT``. Parameters have the same
meaning as for ``find``, except that search is not case sensitive.
*GMPC:*
- does not add quotes around the field argument.
- uses the undocumented field ``any``.
- searches for multiple words like this::
search any "foo" any "bar" any "baz"
*ncmpc:*
- does not add quotes around the field argument.
- capitalizes the field argument.
*ncmpcpp:*
- also uses the search type "date".
- uses "file" instead of "filename".
"""
try:
query = _query_from_mpd_search_format(mpd_query)
except ValueError:
return
results = context.core.library.search(**query).get()
artists = [_artist_as_track(a) for a in _get_artists(results)]
albums = [_album_as_track(a) for a in _get_albums(results)]
tracks = _get_tracks(results)
return translator.tracks_to_mpd_format(artists + albums + tracks)
|
c24881b3298c444f6f80fe690f71ed8f504d78f1
| 31,896 |
def get_camera_pose(camera_info):
"""
:param camera_info:
:return:
"""
camera_pose = list()
for sbj in camera_info.keys():
for i, cam in enumerate(camera_info[sbj]):
camera_pose.append(cam.cam_orig_world.reshape(3))
return np.array(camera_pose)
|
a371ce035b51b8c01775152bd5bb7355040f1cb6
| 31,897 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.