content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import itertools
def generate_all_specs(
population_specs, treatment_specs, outcome_specs, model_specs, estimator_specs
):
"""
Generate all combinations of population, treatment,
outcome, causal model and estimator
"""
causal_graph = CausalGraph(treatment_specs, outcome_specs, model_specs)
model_specs = causal_graph.create_gml_model_specs()
specs = itertools.product(
population_specs, treatment_specs, outcome_specs, model_specs, estimator_specs
)
return [spec for spec in specs if is_valid_spec(spec)]
|
8180bd19d87b69d346edc4fd4442430e0c951873
| 23,771 |
def wsFoc(r,psi,L1,z0,alpha):
"""Return optimum focal surface height at radius r
as given by Chase & Van Speybroeck
"""
return .0625*(psi+1)*(r**2*L1/z0**2)/tan(alpha)**2
|
90076856f2fbef0cea3d662d1789d8392e9b19e0
| 23,772 |
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap, draw_function=draw_lines, **kwargs):
"""
`img` should be the output of a Canny transform.
draw_function: Which which accepts image & line to render lanes. Default: draw_lines()
Returns an image with hough lines drawn.
"""
rho = max(rho, 1)
lines = cv2.HoughLinesP(
img,
rho,
theta,
threshold,
np.array([]),
minLineLength=min_line_len,
maxLineGap=max_line_gap,
)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_function(line_img, lines, **kwargs)
return line_img
|
586de545e1ad51c5495047f5d513b17ca2f7e369
| 23,773 |
from .models import Topography
def instances_to_topographies(topographies, surfaces, tags):
"""Returns a queryset of topographies, based on given instances
Given topographies, surfaces and tags are resolved and
all topographies are returned which are either
- explicitly given
- given indirectly by a surface
- given indirectly by a tag, if the topography is tagged accordingly
- given indirectly by a tag, if its surface is tagged accordingly
Parameters
----------
topographies: sequence of topographies
surfaces: sequence of surfaces
tags: sequence of tags
Returns
-------
Queryset of topography, distinct
"""
topography_ids = [topo.id for topo in topographies]
surface_ids = [s.id for s in surfaces]
tag_ids = [tag.id for tag in tags]
topographies = Topography.objects.filter(id__in=topography_ids)
topographies |= Topography.objects.filter(surface__in=surface_ids)
topographies |= Topography.objects.filter(surface__tags__in=tag_ids)
topographies |= Topography.objects.filter(tags__in=tag_ids)
return topographies.distinct().order_by('id')
|
a5d94de84046a7218f92fb3f75320b7f78bde446
| 23,774 |
from typing import Optional
from typing import Union
from typing import Tuple
import math
def plot_histogram(
s: pd.Series,
*,
number_bins: Optional[int] = None,
bin_range: Union[Tuple[int, int], Tuple[int, int]] = None,
figsize: Optional[Tuple[int, int]] = (8, 6),
bin_width: Optional[int] = None,
edgecolor: Optional[str] = '#ffffff',
linewidth: Optional[int] = 1,
bin_label_bool: Optional[bool] = False,
color: Optional[str] = '#0077bb'
) -> Tuple[plt.Figure, axes.Axes]:
"""
Parameters
----------
s : pd.Series
The input series.
number_bins : Optional[int] = None
The number of equal-width bins in the range s.max() - s.min().
bin_range : Union[Tuple[int, int],Tuple[int, int]] = None,
The lower and upper range of the bins. If not provided, range is
(s.min(), s.max()).
figsize : Optional[Tuple[int, int]] = (8, 6),
The figure size width, height (inch).
bin_width : Optional[int] = None,
The width of the bin in same units as the series s.
edgecolor : Optional[str] = '#ffffff',
The hexadecimal color value for the bar edges.
linewidth : Optional[int] = 1,
The bar edges line width (point).
bin_label_bool : Optional[bool] = False
If True, label the bars with count and percentage of total.
color : Optional[str] = '#0077bb'
The color of the bar faces.
Returns
-------
fig, ax : Tuple[plt.Figure, axes.Axes]
Examples
--------
Example 1
# Create a series of random floats, normal distribution,
# with the default parameters.
>>> import datasense as ds
>>> s = ds.random_data()
>>> fig, ax = ds.plot_histogram(s=s)
Example 2
# Create a series of random integers, integer distribution, size = 113,
# min = 0, max = 13.
>>> import datasense as ds
>>> s = ds.random_data(
>>> distribution='randint',
>>> size=113,
>>> low=0,
>>> high=14
>>> )
>>> fig, ax = ds.plot_histogram(s=s)
Example 3
# Create a series of random integers, integer distribution, size = 113,
# min = 0, max = 13.
# Set histogram parameters to control bin width.
>>> s = ds.random_data(
>>> distribution='randint',
>>> size=113,
>>> low=0,
>>> high=14
>>> )
>>> fig, ax = ds.plot_histogram(
>>> s=s,
>>> bin_width=1
)
Example 4
# Create a series of random integers, integer distribution, size = 113,
# min = 0, hight = 14,
# Set histogram parameters to control bin width and plotting range.
>>> s = ds.random_data(
>>> distribution='randint',
>>> size=113,
>>> low=0,
>>> high=13
>>> )
>>> fig, ax = ds.plot_histogram(
>>> s=s,
>>> bin_width=1,
>>> bin_range=(0, 10)
>>> )
Example 5
# Create a series of random floats, size = 113,
# average = 69, standard deviation = 13.
# Set histogram parameters to control bin width and plotting range.
>>> s = ds.random_data(
>>> distribution='norm',
>>> size=113,
>>> loc=69,
>>> scale=13
>>> )
>>> fig, ax = ds.plot_histogram(
>>> s=s,
>>> bin_width=5,
>>> bin_range=(30, 110)
>>> )
Example 6
# Create a series of random floats, size = 113,
# average = 69, standard deviation = 13.
# Set histogram parameters to control bin width, plotting range, labels.
# Set colour of the bars.
>>> s = ds.random_data(
>>> distribution='norm',
>>> size=113,
>>> loc=69,
>>> scale=13
>>> )
>>> fig, ax = ds.plot_histogram(
>>> s=s,
>>> bin_width=5,
>>> bin_range=(30, 110),
>>> figsize=(10,8),
>>> bin_label_bool=True,
>>> color='#33bbee'
>>> )
"""
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
if bin_width and not bin_range:
x = (s.max() - s.min()) / bin_width
number_bins = math.ceil(x)
elif bin_width and bin_range:
number_bins = int((bin_range[1] - bin_range[0]) / bin_width)
bin_range = bin_range
counts, bins, patches = ax.hist(
x=s,
bins=number_bins,
range=bin_range,
edgecolor=edgecolor,
linewidth=linewidth,
color=color
)
if bin_label_bool:
ax.set_xticks(bins)
ax.xaxis.set_major_formatter(FormatStrFormatter('%0.0f'))
bin_centers = 0.5 * np.diff(bins) + bins[:-1]
for count, x in zip(counts, bin_centers):
ax.annotate(
text=f'{str(int(count))}',
xy=(x, 0),
xytext=(0, -18),
xycoords=(
'data',
'axes fraction'
),
textcoords='offset points',
va='top',
ha='center'
)
percent = f'{(100 * float(count) / counts.sum()):0.0f} %'
ax.annotate(
text=percent,
xy=(x, 0),
xytext=(0, -32),
xycoords=(
'data',
'axes fraction'
),
textcoords='offset points',
va='top',
ha='center'
)
return (fig, ax)
|
3f51a9abbf8dde862e18bb21b82f89c34dd6a536
| 23,775 |
def get_tracks():
"""
Returns all tracks on the minerva DB
"""
# connect to the database
db = connect_minerva_db()
# return all the tracks as a list
tracks = list(db.tracks.find())
return tracks
|
65eedeaf32f448a6c32f8c77476dbea6b55a55b0
| 23,776 |
def mult_pair(pair):
"""Return the product of two, potentially large, numbers."""
return pair[0]*pair[1]
|
b616a0fb706eec5ca8723aa05c273ece079a2350
| 23,777 |
def get_only_filename(file_list):
"""
Get filename from file's path and return list that has only filename.
Input:
file_list: List. file's paths list.
Attribute:
file_name: String. "01.jpg"
file_name_without_ext: String. "01"
Return:
filename_list: Only filename list.
"""
filename_list = list()
for file_path in file_list:
file_name = file_path.split("/")[-1]
file_name_without_ext = file_name.split(".")[0]
filename_list.append(file_name_without_ext)
return filename_list
|
3b9b202a4320825eba9d32170f527c0de6e1bdc6
| 23,778 |
import _ctypes
def simple_calculate_hmac(sym_key, message,
digest_algo=DIGEST_ALGORITHM.SHA256):
"""Calculates a HMAC of given message using symmetric key."""
message_param = _get_char_param_nullify_if_zero(message)
mac = _ctypes.POINTER(_ctypes.c_char)()
mac_length = _ctypes.c_size_t()
_lib.yaca_simple_calculate_hmac(digest_algo.value, sym_key,
message_param, len(message),
_ctypes.byref(mac),
_ctypes.byref(mac_length))
mac_bytes = mac[:mac_length.value]
_lib.yaca_free(mac)
return mac_bytes
|
242f703d062366828f6980d90901a1b803fc426a
| 23,779 |
def convert_from_sliced_object(data):
"""Fix the memory of multi-dimensional sliced object."""
if isinstance(data, np.ndarray) and isinstance(data.base, np.ndarray):
if not data.flags.c_contiguous:
_log_warning("Usage of np.ndarray subset (sliced data) is not recommended "
"due to it will double the peak memory cost in LightGBM.")
return np.copy(data)
return data
|
88361b30137ce9ca646e49b1865d79b65f2693aa
| 23,780 |
def stat_helper(path):
"""os.path.exists will return None for PermissionError (or any other
exception) , leading us to believe a file is not present when it, in fact,
is. This is behavior is awful, so stat_helper preserves any exception
other than FileNotFoundError.
"""
try:
return path.stat()
except FileNotFoundError:
return None
|
32e0863489ca19b55203d31b141d837189655cc2
| 23,782 |
from typing import Tuple
from datetime import datetime
def create_beacon_and_now_datetime(
game_name: str = "st",
waiting_time: float = 12.0,
platform_name: str = "pc"
) -> Tuple[beacons.BeaconBase, datetime.datetime]:
"""Return a BeaconBase instance with start time to current time."""
now = datetime.datetime.now(datetime.timezone.utc) \
.replace(microsecond=0)
beacon = create_beacon(game_name=game_name,
waiting_time=waiting_time,
platform_name=platform_name,
start=now)
return beacon, now
|
cff66d951e2b488a0c8ecd61f6ce5bdbeddae4f7
| 23,783 |
def validate(number, check_country=True):
"""Checks to see if the number provided is a valid IBAN. The country-
specific check can be disabled with the check_country argument."""
number = compact(number)
# ensure that checksum is valid
mod_97_10.validate(number[4:] + number[:4])
# look up the number
info = _ibandb.info(number)
# check if the bban part of number has the correct structure
bban = number[4:]
if not _struct_to_re(info[0][1].get('bban', '')).match(bban):
raise InvalidFormat()
# check the country-specific module if it exists
if check_country:
module = _get_cc_module(number[:2])
if module:
module.validate(number)
# return the compact representation
return number
|
55ee5423ff025ab9e4332d099e5c2d7b695163dd
| 23,784 |
from numpy import array
def read_group(fname):
"""Reads the symmetry group in from the 'rot_perms' styled group
output by enum.x.
:arg fname: path to the file to read the group from.
"""
i=0
groupi = []
with open(fname) as f:
for line in f:
if i > 5:
if ('Perm #:') in line:
groupi.append(list(map(int, line.split()[4::])))
else:
groupi[-1] += list(map(int, line.split()))
i += 1
return(list(map(list, array(groupi)-1)))
|
7971781ae157c94329c638d4afd51a871b39498f
| 23,785 |
def find_last_layer(model):
"""
Find last layer.
Args:
model (_type_): Model.
Returns:
_type_: Last layer.
"""
for layer in reversed(model.layers):
return layer
|
ff82705e4a74d7ad15b3d0e3e030c340b49052ca
| 23,787 |
def seconds_to_time(sec):
"""
Convert seconds into time H:M:S
"""
return "%02d:%02d" % divmod(sec, 60)
|
5fe639a9a6ade59258dfb2b3df8426c7e79d19fa
| 23,788 |
def _compute_nfp_real(l, u, counts, sizes):
"""Computes the expected number of false positives caused by using
u to approximate set sizes in the interval [l, u], using the real
set size distribution.
Args:
l: the lower bound on set sizes.
u: the upper bound on set sizes.
counts: the complete distribution of set sizes.
sizes: the complete domain of set sizes.
Return (float): the expected number of false positives.
"""
if l > u:
raise ValueError("l must be less or equal to u")
return np.sum((float(sizes[u])-sizes[l:u+1])/float(sizes[u])*counts[l:u+1])
|
40abf796ce116a92cd89c813a6343c917e413707
| 23,789 |
from typing import Union
from typing import Dict
from typing import List
from typing import Set
def is_equal_subset(
subset: Union[Dict, List, Set], superset: Union[Dict, List, Set]
) -> bool:
"""determine if all shared keys have equal value"""
if isinstance(subset, dict):
return all(
key in superset and is_equal_subset(val, superset[key])
for key, val in subset.items()
)
if isinstance(subset, list) or isinstance(subset, set):
return all(
any(is_equal_subset(subitem, superitem) for superitem in superset)
for subitem in subset
)
# assume that subset is a plain value if none of the above match
return subset == superset
|
4c2edbc73c350783d795ee0aa6e12180642e205c
| 23,790 |
import copy
import itertools
def concatenate_over(argname):
"""Decorator to "vectorize" functions and concatenate outputs
"""
def _prepare_args(arg_map, value):
params = copy(arg_map)
params[argname] = value
return params
@decorator
def _concatenate_over(func, *args, **kwargs):
"""Validate that an agument is a proportion [0, 1.0]
"""
arg_map = map_parameters_in_fn_call(args, kwargs, func)
value = arg_map.get(argname)
if isinstance(value, list):
return list(
itertools.chain.from_iterable(
func(**_prepare_args(arg_map, v)) for v in value))
else:
return func(**arg_map)
return _concatenate_over
|
8bdf286566409bc6f8f97e06f5800e495afbc042
| 23,791 |
import ctypes
def logicalToPhysicalPoint(window, x, y):
"""Converts the logical coordinates of a point in a window to physical coordinates.
This should be used when points are received directly from a window that is not DPI aware.
@param window: The window handle.
@param x: The logical x coordinate.
@type x: int
@param y: The logical y coordinate.
@type y: int
@return: The physical x and y coordinates.
@rtype: tuple of (int, int)
"""
if not _logicalToPhysicalPoint:
return x, y
point = ctypes.wintypes.POINT(x, y)
_logicalToPhysicalPoint(window, ctypes.byref(point))
return point.x, point.y
|
81aeadcef460ffe1be64a69e64b562cea5dc94d6
| 23,793 |
import numba
def _node2vec_walks(Tdata, Tindptr, Tindices,
sampling_nodes,
walklen,
return_weight,
neighbor_weight):
"""
Create biased random walks from the transition matrix of a graph
in CSR sparse format. Bias method comes from Node2Vec paper.
Parameters
----------
Tdata : 1d np.array
CSR data vector from a sparse matrix. Can be accessed by M.data
Tindptr : 1d np.array
CSR index pointer vector from a sparse matrix.
Can be accessed by M.indptr
Tindices : 1d np.array
CSR column vector from a sparse matrix.
Can be accessed by M.indices
sampling_nodes : 1d np.array of int
List of node IDs to start random walks from.
Is generally equal to np.arange(n_nodes) repeated for each epoch
walklen : int
length of the random walks
return_weight : float in (0, inf]
Weight on the probability of returning to node coming from
Having this higher tends the walks to be
more like a Breadth-First Search.
Having this very high (> 2) makes search very local.
Equal to the inverse of p in the Node2Vec paper.
explore_weight : float in (0, inf]
Weight on the probability of visitng a neighbor node
to the one we're coming from in the random walk
Having this higher tends the walks to be
more like a Depth-First Search.
Having this very high makes search more outward.
Having this very low makes search very local.
Equal to the inverse of q in the Node2Vec paper.
Returns
-------
out : 2d np.array (n_walks, walklen)
A matrix where each row is a biased random walk,
and each entry is the ID of the node
"""
n_walks = len(sampling_nodes)
res = np.empty((n_walks, walklen), dtype=Tindices.dtype)
for i in numba.prange(n_walks):
# Current node (each element is one walk's state)
state = sampling_nodes[i]
res[i, 0] = state
# Do one normal step first
state = _node2vec_first_step(state, Tdata, Tindices, Tindptr)
for k in range(1, walklen-1):
# Write state
res[i, k] = state
state = _node2vec_inner(
res, i, k, state,
Tdata, Tindices, Tindptr,
return_weight, neighbor_weight
)
# Write final states
res[i, -1] = state
return res
|
1a6ec24c62168f905a22809fc411036ab9f83b57
| 23,794 |
def schedule_prettify(schedule):
"""
Принимает на вход расписание в формате:
[День недели, Время, Тип занятия, Наименование занятия, Имя преподавателя, Место проведения]
Например: ['Чт', '13:00 – 14:30', 'ПЗ', 'Физическая культура', '', 'Кафедра']
"""
if not schedule:
return 'Сегодня занятий нету'
else:
bot_message = ''
time = '⌚ ' + schedule[1] + '\n'
if schedule[2]:
schedule_type = schedule[2]
else:
schedule_type = ''
if schedule[3]:
subject = '📝 ' + schedule[-3] + '\n'
else:
subject = '📝 ' + schedule_type + '\n'
if schedule[4]:
teacher = '👤 ' + schedule[4] + '\n'
else:
teacher = ''
if schedule[5]:
location = '📍 ' + schedule[5] + '\n'
else:
location = ''
bot_message += teacher + subject + time + location + '\n'
return bot_message
|
868469b99bb68ec407f6861e12d063bcd6b56236
| 23,795 |
def autodelegate(prefix=''):
"""
Returns a method that takes one argument and calls the method named prefix+arg,
calling `notfound()` if there isn't one. Example:
urls = ('/prefs/(.*)', 'prefs')
class prefs:
GET = autodelegate('GET_')
def GET_password(self): pass
def GET_privacy(self): pass
`GET_password` would get called for `/prefs/password` while `GET_privacy` for
`GET_privacy` gets called for `/prefs/privacy`.
If a user visits `/prefs/password/change` then `GET_password(self, '/change')`
is called.
"""
def internal(self, arg):
if '/' in arg:
first, rest = arg.split('/', 1)
func = prefix + first
args = ['/' + rest]
else:
func = prefix + arg
args = []
if hasattr(self, func):
try:
return getattr(self, func)(*args)
except TypeError:
return notfound()
else:
return notfound()
return internal
|
8ea5f555c3b102fc1830a4c616bd71f2dbf98ce4
| 23,796 |
def _compute_new_static_size(image, min_dimension, max_dimension):
"""Compute new static shape for resize_to_range method."""
image_shape = image.get_shape().as_list()
orig_height = image_shape[0]
orig_width = image_shape[1]
num_channels = image_shape[2]
# Scale factor such that maximal dimension is at most max_dimension
orig_max_dim = max(orig_height, orig_width)
small_scale_factor = max_dimension / float(orig_max_dim)
# if this factor is less than 1 we have to act!
# Scale factor such that minimal dimension is at least min_dimension
orig_min_dim = min(orig_height, orig_width)
large_scale_factor = min_dimension / float(orig_min_dim)
# If image is already big enough... do nothing
large_scale_factor = max(large_scale_factor, 1.0)
# Take the minimum (we ensure that maxdim is not exceeded and if possible min_dim is met also)
scale_factor = min(small_scale_factor, large_scale_factor)
new_height = int(round(orig_height * scale_factor))
new_width = int(round(orig_width * scale_factor))
new_size = [new_height, new_width]
return tf.constant(new_size + [num_channels])
|
1cc3a3465f69a8c799ccc529ba95efba0319fdf0
| 23,797 |
def deepupdate(original, update):
"""
Recursively update a dict.
Subdict's won't be overwritten but also updated.
"""
for key, value in original.items():
if key not in update:
update[key] = value
elif isinstance(value, dict):
deepupdate(value, update[key])
return update
|
fc06aded11a674a0c5815a6f365ff790506af362
| 23,798 |
import itertools
def _omega_spectrum_odd_c(n, field):
"""Spectra of groups \Omega_{2n+1}(q) for odd q.
[1, Corollary 6]
"""
n = (n - 1) // 2
q = field.order
p = field.char
# (1)
t = (q ** n - 1) // 2
a1 = [t, t + 1]
# (2)
a2 = SemisimpleElements(q, n, min_length=2)
# (3)
k = 1
a3 = []
while True:
n1 = n - (p ** (k - 1) + 1) // 2
if n1 < 1:
break
t = (q ** n1 - 1) // 2
a3.extend([t * p ** k, (t + 1) * p ** k])
k += 1
# (4)
a4 = MixedElements(q, n,
lambda k: (p ** (k - 1) + 1) // 2,
lambda k: p ** k, min_length=2)
# (5)
k = numeric.get_exponent(2 * n - 1, p)
a5 = [] if k is None else [p * (2 * n - 1)]
return itertools.chain(a1, a2, a3, a4, a5)
|
02512e8368ce1ef048ce0bfa8380ff7e102cdff7
| 23,799 |
def get_service(hass, config, discovery_info=None):
"""Get the ClickSend notification service."""
if not _authenticate(config):
_LOGGER.error("You are not authorized to access ClickSend")
return None
return ClicksendNotificationService(config)
|
4b3dd52d7ebcb37012bc847288d0ea38c4ca91f6
| 23,800 |
from faker import Faker
def mock_features_dtypes(num_rows=100):
"""Internal function that returns the default full dataset.
:param num_rows: The number of observations in the final dataset. Defaults to 100.
:type num_rows: int, optional
:return: The dataset with all columns included.
:rtype tuple: (str, str)
"""
fake = Faker()
def _remove_x_from_number(phone):
if "x" in phone:
phone = phone[: phone.find("x")]
return phone
phone_numbers = pd.Series([fake.phone_number() for _ in range(num_rows)])
phone_numbers = phone_numbers.apply(_remove_x_from_number)
def _remove_newline_from_address(address):
address = address.replace("\n", ", ")
return address
addresses = pd.Series([fake.address() for _ in range(num_rows)])
addresses = addresses.apply(_remove_newline_from_address)
dtypes_dict = {
"ints": [i for i in range(-num_rows // 2, num_rows // 2)],
"rand_ints": np.random.choice([i for i in range(-5, 5)], num_rows),
"floats": [float(i) for i in range(-num_rows // 2, num_rows // 2)],
"rand_floats": np.random.uniform(low=-5.0, high=5.0, size=num_rows),
"booleans": np.random.choice([True, False], num_rows),
"categoricals": np.random.choice(
["First", "Second", "Third", "Fourth"], num_rows
),
"dates": pd.date_range("1/1/2001", periods=num_rows),
"texts": [
f"My children are miserable failures, all {i} of them!"
for i in range(num_rows)
],
"ints_nullable": np.random.choice(
[i for i in range(-10 // 2, 10 // 2)] + [pd.NA], num_rows
),
"floats_nullable": np.random.choice(
np.append([float(i) for i in range(-5, 5)], pd.NA), num_rows
),
"booleans_nullable": np.random.choice([True, False, None], num_rows),
"full_names": pd.Series([fake.name() for _ in range(num_rows)]),
"phone_numbers": phone_numbers,
"addresses": addresses,
"countries": pd.Series([fake.country() for _ in range(num_rows)]),
"email_addresses": pd.Series(
[fake.ascii_free_email() for _ in range(num_rows)]
),
"urls": pd.Series([fake.url() for _ in range(num_rows)]),
"currencies": pd.Series([fake.pricetag() for _ in range(num_rows)]),
"file_paths": pd.Series([fake.file_path(depth=3) for _ in range(num_rows)]),
"ipv4": pd.Series([fake.ipv4() for _ in range(num_rows)]),
"ipv6": pd.Series([fake.ipv6() for _ in range(num_rows)]),
"lat_longs": pd.Series([fake.latlng() for _ in range(num_rows)]),
}
return dtypes_dict
|
c9d9bef26d908b2e47d4bc2e013f0c29e328b2b3
| 23,801 |
def random_bitstring(n, p, failcount=0):
"""
Constructs a random bitstring of length n with parity p
Parameters
----------
n : int
Number of bits.
p : int
Parity.
failcount : int, optional
Internal use only.
Returns
-------
numpy.ndarray
"""
bitstring = _np.random.randint(0, 2, size=n)
if _np.mod(sum(bitstring), 2) == p:
return bitstring
elif failcount < 100:
return _np.array(random_bitstring(n, p, failcount + 1), dtype='int')
|
07637061e50bc1fe853aeb2eef19505ee1a6b612
| 23,802 |
import json
def serializer(message):
"""serializes the message as JSON"""
return json.dumps(message).encode('utf-8')
|
7e8d9ae8e31653aad594a81e9f45170a915e291d
| 23,803 |
def send_mail(request, format=None):
"""
Send mail to admin
"""
# serialize request data
serializer = MailSerializer(data=request.data)
if serializer.is_valid():
try:
# create data for mail
subject = settings.EMAIL_SUBJECT.format(
first_name=request.data["first_name"],
last_name=request.data["last_name"],
)
msg = request.data["message"]
email_from = request.data["email_from"]
# send mail
EmailMessage(subject, msg, email_from, [settings.EMAIL_TO]).send()
# save mail instance
serializer.save(
owner=request.user,
email_to=settings.EMAIL_TO,
host_ip=request.META["REMOTE_ADDR"],
)
return Response(serializer.data, status=status.HTTP_201_CREATED)
except Exception:
pass
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
02ac2d0f7bf76fcea0afcff0a2a690cf72836e08
| 23,804 |
def correct_name(name):
"""
Ensures that the name of object used to create paths in file system do not
contain characters that would be handled erroneously (e.g. \ or / that
normally separate file directories).
Parameters
----------
name : str
Name of object (course, file, folder, etc.) to correct
Returns
-------
corrected_name
Corrected name
"""
corrected_name = name.replace(" ", "_")
corrected_name = corrected_name.replace("\\", "_")
corrected_name = corrected_name.replace("/", "_")
corrected_name = corrected_name.replace(":", "_")
return corrected_name
|
b1df7a503324009a15f4f08e7641722d15a826b7
| 23,805 |
def runner(parallel, config):
"""Run functions, provided by string name, on multiple cores on the current machine.
"""
def run_parallel(fn_name, items):
items = [x for x in items if x is not None]
if len(items) == 0:
return []
items = diagnostics.track_parallel(items, fn_name)
logger.info("multiprocessing: %s" % fn_name)
fn = get_fn(fn_name, parallel)
if "wrapper" in parallel:
wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources"])}
items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items]
return run_multicore(fn, items, config, parallel=parallel)
return run_parallel
|
66ba2c5cd57d4d2738d9dd3d57ca9d5daca2ec8d
| 23,806 |
def hsv_to_hsl(hsv):
"""
HSV to HSL.
https://en.wikipedia.org/wiki/HSL_and_HSV#Interconversion
"""
h, s, v = hsv
s /= 100.0
v /= 100.0
l = v * (1.0 - s / 2.0)
return [
HSV._constrain_hue(h),
0.0 if (l == 0.0 or l == 1.0) else ((v - l) / min(l, 1.0 - l)) * 100,
l * 100
]
|
4fee4508f6db770265ef46e928cfed6dee094892
| 23,807 |
from .._mesh import Mesh
def reindex_faces(mesh, ordering):
"""
Reorder the faces of the given mesh, returning a new mesh.
Args:
mesh (lacecore.Mesh): The mesh on which to operate.
ordering (np.arraylike): An array specifying the order in which
the original faces should be arranged.
Returns:
lacecore.Mesh: The reindexed mesh.
"""
vg.shape.check(locals(), "ordering", (mesh.num_f,))
unique_values = np.unique(ordering)
if not np.array_equal(unique_values, np.arange(mesh.num_f)):
raise ValueError(
"Expected new face indices to be unique, and range from 0 to {}".format(
mesh.num_f - 1
)
)
return Mesh(
v=mesh.v,
f=mesh.f[ordering],
face_groups=None
if mesh.face_groups is None
else mesh.face_groups.reindexed(ordering),
)
|
254cf3a036fa92253b105f3acd93dfe26d33a61c
| 23,809 |
import re
def check_exact_match(line, expected_line):
"""
Uses regular expressions to find an exact (not partial) match for 'expected_line' in 'line', i.e.
in the example below it matches 'foo' and succeeds:
line value: '66118.999958 - INFO - [MainThread] - ly_test_tools.o3de.asset_processor - foo'
expected_line: 'foo'
:param line: The log line string to search,
i.e. '9189.9998188 - INFO - [MainThread] - example.tests.test_system_example - Log Monitoring test 1'
:param expected_line: The exact string to match when searching the line param,
i.e. 'Log Monitoring test 1'
:return: An exact match for the string if one is found, None otherwise.
"""
# Look for either start of line or whitespace, then the expected_line, then either end of the line or whitespace.
# This way we don't partial match inside of a string. So for example, 'foo' matches 'foo bar' but not 'foobar'
regex_pattern = re.compile("(^|\\s){}($|\\s)".format(re.escape(expected_line)), re.UNICODE)
if regex_pattern.search(line) is not None:
return expected_line
return None
|
d01eaa13c40d66999e870d3b287ac869f64ae314
| 23,810 |
def rounding_filters(filters, w_multiplier):
""" Calculate and round number of filters based on width multiplier. """
if not w_multiplier:
return filters
divisor = 8
filters *= w_multiplier
new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters: # prevent rounding by more than 10%
new_filters += divisor
return int(new_filters)
|
eb2938732792564fd324602fd74be41e6f88b265
| 23,811 |
def process(request, service, identifier):
"""
View that displays a detailed description for a WPS process.
"""
wps = get_wps_service_engine(service)
wps_process = wps.describeprocess(identifier)
context = {'process': wps_process,
'service': service,
'is_link': abstract_is_link(wps_process)}
return render(request, 'tethys_wps/process.html', context)
|
0d9f5a0cdf7c15470547ff82ff3534cf6f624960
| 23,812 |
def update(pipeline_id, name, description):
"""Submits a request to CARROT's pipelines update mapping"""
# Create parameter list
params = [
("name", name),
("description", description),
]
return request_handler.update("pipelines", pipeline_id, params)
|
e808fb0fc313e8a5e51bb448d0aca68e389bfc30
| 23,813 |
from re import S
def symmetric_poly(n, *gens, **args):
"""Generates symmetric polynomial of order `n`. """
gens = _analyze_gens(gens)
if n < 0 or n > len(gens) or not gens:
raise ValueError("can't generate symmetric polynomial of order %s for %s" % (n, gens))
elif not n:
poly = S.One
else:
poly = Add(*[ Mul(*s) for s in subsets(gens, int(n)) ])
if not args.get('polys', False):
return poly
else:
return Poly(poly, *gens)
|
51177adf8873628669c1b75267c3c65821920044
| 23,814 |
from typing import Any
def load(name: str, *args, **kwargs) -> Any:
"""
Loads the unit specified by `name`, initialized with the given arguments
and keyword arguments.
"""
entry = get_entry_point(name)
return entry.assemble(*args, **kwargs)
|
e924dcebf082443fb9f36cd81302cb62ac53775d
| 23,815 |
from typing import List
def get_gate_names_2qubit() -> List[str]:
"""Return the list of valid gate names of 2-qubit gates."""
names = []
names.append("cx")
names.append("cz")
names.append("swap")
names.append("zx90")
names.append("zz90")
return names
|
d3d7f20263805a186d9142ec087039eb53076346
| 23,816 |
def positive_leading_quat(quat):
"""Returns the positive leading version of the quaternion.
This function supports inputs with or without leading batch dimensions.
Args:
quat: A quaternion [w, i, j, k].
Returns:
The equivalent quaternion [w, i, j, k] with w > 0.
"""
# Ensure quat is an np.array in case a tuple or a list is passed
quat = np.asarray(quat)
quat = np.where(np.tile(quat[..., 0:1] < 0, quat.shape[-1]), -quat, quat)
return quat
|
e0dce2e8fce42a15abdeeccc4bbf63c9e9241cf1
| 23,817 |
def comp_avg_silh_metric(data_input, cluster_indices, silh_max_samples, silh_distance):
"""
Given a input data matrix and an array of cluster indices, returns the
average silhouette metric for that clustering result (computed across all clusters).
Parameters
----------
data_input : ndarray
Data to be clustered (each row contains a n-dimensional data sample)
cluster_indices : list
List containing for each data point (each row in data input) its cluster id
silh_max_samples: int
Maximum number of samples to compute the silhouette metric (higher for
more exact values at higher computing costs)
silh_distance: string
Metric to use when calculating distance between instances
e.g. 'euclidean', 'manhattan', 'cosine'
Returns
-------
avg_silhouette : float
Silhouette metric averaged across all clusters
"""
# Sample data for computing the silhouette metric
input_data_x_sample = None
cluster_indices_sample = None
for curr_cluster_id in set(cluster_indices):
list_occurrences = [
i for i, x in enumerate(cluster_indices) if x == curr_cluster_id
]
if input_data_x_sample is None:
input_data_x_sample = data_input[list_occurrences[0:silh_max_samples]]
else:
input_data_x_sample = np.vstack(
(
input_data_x_sample,
data_input[list_occurrences[0:silh_max_samples]],
)
)
if cluster_indices_sample is None:
cluster_indices_sample = np.array(cluster_indices)[
list_occurrences[0:silh_max_samples]
]
else:
cluster_indices_sample = np.hstack(
(
cluster_indices_sample,
np.array(cluster_indices)[list_occurrences[0:silh_max_samples]],
)
)
# Compute mean silhouette for each class and the average across all classes
try:
silh_array = metrics.silhouette_samples(
input_data_x_sample,
np.asarray(cluster_indices_sample),
metric=silh_distance,
)
np_silh_samples = np.column_stack((cluster_indices_sample, silh_array.tolist()))
df_silh_samples = pd.DataFrame(
data=np_silh_samples[0:, 0:], columns=["cluster_id", "silhouette"]
)
df_silh_mean_per_class = df_silh_samples.groupby(
["cluster_id"]
).mean() # .sort_values(by='cluster_id')
df_silh_mean_per_class.reset_index(level=0, inplace=True)
df_silh_mean_per_class.sort_values(by="cluster_id")
avg_silhouette = df_silh_mean_per_class["silhouette"].mean()
except ValueError:
avg_silhouette = np.nan
return avg_silhouette
|
0d2cb42b1f0b354f4776c9c2064d23ed4b8f0b75
| 23,818 |
def prepare_url(base_url, path, url_params=None):
"""Prepare url from path and params"""
if url_params is None:
url_params = {}
url = '{0}{1}'.format(base_url, path)
if not url.endswith('/'):
url += '/'
url_params_str = urlencode(url_params)
if url_params_str:
url += '?' + url_params_str
return url
|
0a447d9f340a4ea9c99b98ca1e6f778f907d8a3d
| 23,819 |
def extract_at_interval(da: xr.DataArray, interval) -> xr.DataArray:
"""Reduce size of an Error Grid by selecting data at a fixed interval along
both the number of high- and low-fidelity samples.
"""
return da.where(
da.n_high.isin(da.n_high[slice(None, None, interval)]) *
da.n_low.isin(da.n_low[slice(None, None, interval)])
)
|
cad3dce9850edbad9decadbc37e0372001b8ecc9
| 23,820 |
def compute_log_zT_var(log_rho_var, log_seebeck_sqr_var, log_kappa_var):
"""Compute the variance of the logarithmic thermoelectric figure
of merit zT.
"""
return log_rho_var + log_seebeck_sqr_var + log_kappa_var
|
3528181796aeafb3df5eac09b06852afe028cb13
| 23,821 |
def main(global_config, **settings):
""" Very basic pyramid app """
config = Configurator(settings=settings)
config.include('pyramid_swagger')
config.add_route(
'sample_nonstring',
'/sample/nonstring/{int_arg}/{float_arg}/{boolean_arg}',
)
config.add_route('standard', '/sample/{path_arg}/resource')
config.add_route('get_with_non_string_query_args', '/get_with_non_string_query_args')
config.add_route('post_with_primitive_body', '/post_with_primitive_body')
config.add_route('post_with_form_params', '/post_with_form_params')
config.add_route('post_with_file_upload', '/post_with_file_upload')
config.add_route('sample_post', '/sample')
config.include(include_samples, route_prefix='/sample')
config.add_route('throw_400', '/throw_400')
config.add_route('swagger_undefined', '/undefined/path')
config.add_route('echo_date', '/echo_date')
config.add_route('echo_date_json_renderer', '/echo_date_json_renderer')
config.add_route('post_endpoint_with_optional_body', '/post_endpoint_with_optional_body')
config.scan()
return config.make_wsgi_app()
|
677187e63b6b885f5dc27850039a54b7510ed9cf
| 23,822 |
def get_name():
"""MUST HAVE FUNCTION! Returns plugin name."""
return "ASP.NET MVC"
|
08a8b413ad1c86c270c79da245f0718aa22883a8
| 23,823 |
def load_CIFAR(model_mode):
"""
Loads CIFAR-100 or CIFAR-10 dataset and maps it to Target Model and Shadow Model.
:param model_mode: one of "TargetModel" and "ShadowModel".
:param num_classes: one of 10 and 100 and the default value is 100
:return: Tuple of numpy arrays:'(x_train, y_train), (x_test, y_test), member'.
:raise: ValueError: in case of invalid `model_mode`.
"""
if model_mode not in ['TargetModel', 'ShadowModel']:
raise ValueError('model_mode must be one of TargetModel, ShadowModel.')
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar100.load_data(label_mode='fine')
if model_mode == "TargetModel":
(x_train, y_train), (x_test, y_test) = (x_train[40000:50000], y_train[40000:50000]), \
(x_test, y_test)
elif model_mode == "ShadowModel":
(x_train, y_train), (x_test, y_test) = (x_train[:10000], y_train[:10000]), \
(x_train[10000:20000], y_train[10000:20000])
y_train = tf.keras.utils.to_categorical(y_train, num_classes=100)
m_train = np.ones(y_train.shape[0])
y_test = tf.keras.utils.to_categorical(y_test, num_classes=100)
m_test = np.zeros(y_test.shape[0])
member = np.r_[m_train, m_test]
return (x_train, y_train), (x_test, y_test), member
|
d47d5546c26b1a776b84acb407782837170da43d
| 23,825 |
def _jbackslashreplace_error_handler(err):
"""
Encoding error handler which replaces invalid characters with Java-compliant Unicode escape sequences.
:param err: An `:exc:UnicodeEncodeError` instance.
:return: See https://docs.python.org/2/library/codecs.html?highlight=codecs#codecs.register_error
"""
if not isinstance(err, UnicodeEncodeError):
raise err
return _escape_non_ascii(err.object[err.start:err.end]), err.end
|
2bec9e9563a7f4a4d206f630f7d8372fa7c56d89
| 23,826 |
from pathlib import Path
def run_on_host(con_info, command):
"""
Runs a command on a target pool of host defined in a hosts.yaml file.
"""
# Paramiko client configuration
paramiko.util.log_to_file(base + "prt_paramiko.log")
UseGSSAPI = (paramiko.GSS_AUTH_AVAILABLE)
DoGSSAPIKeyExchange = (paramiko.GSS_AUTH_AVAILABLE)
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.load_system_host_keys()
if not UseGSSAPI and not DoGSSAPIKeyExchange:
client.connect(
con_info[1]['IP'],
port=con_info[1]['PORT'],
username=con_info[1]['USER'],
key_filename=str(base + 'prt_rsa.key')
)
else:
client.connect(
con_info[1]['IP'],
port=con_info[1]['PORT'],
username=con_info['USER'],
key_filename=str(Path.home()) + 'prt_rsa.key',
gss_auth=UseGSSAPI,
gss_kex=DoGSSAPIKeyExchange,
)
con_status = str('Connection Succeeded')
stdin, stdout, stderr = client.exec_command(command)
results_dict = {
'name': con_info[0],
'uname': con_info[1]['NAME'],
'status': con_status,
'stdout': [x.replace('\n', '') for x in stdout.readlines()],
'stderr': [x.replace('\n', '') for x in stderr.readlines()]
}
client.close()
except Exception as error:
con_status = str("Connection Failed : PRT Caught exception(%s: %s" % (error.__class__, error) + ')')
results_dict = {
'name': con_info[0],
'uname': con_info[1]['NAME'],
'status': con_status,
'stdout': [],
'stderr': []
}
try:
client.close()
except Exception:
pass
return results_dict
|
e3747daa1ea6e68ae4900bd596458bf756017c69
| 23,828 |
import colorsys
import hashlib
def uniqueColor(string):
"""
Returns a color from the string.
Same strings will return same colors, different strings will return different colors ('randomly' different)
Internal: string =md5(x)=> hex =x/maxhex=> float [0-1] =hsv_to_rgb(x,1,1)=> rgb =rgb_to_int=> int
:param string: input string
:return: int color
"""
return sum(round(c * 255) << d for c, d in zip(colorsys.hsv_to_rgb(int(hashlib.md5(string.encode('utf-8')).hexdigest(), 16) / 2 ** 128, 1, 1), [16, 8, 0]))
|
0c895612c3bf2dd5f594a15daf6f2aa5d778eeb0
| 23,829 |
def _quoteattr(data, entities={}):
""" Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities['\n']=' '
entities['\r']=''
data = _escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
|
1f03a09e19d349458ec48b6041159e48ef93d97e
| 23,830 |
import http
import urllib
def external_login_confirm_email_get(auth, uid, token):
"""
View for email confirmation links when user first login through external identity provider.
HTTP Method: GET
When users click the confirm link, they are expected not to be logged in. If not, they will be logged out first and
redirected back to this view. After OSF verifies the link and performs all actions, they will be automatically
logged in through CAS and redirected back to this view again being authenticated.
:param auth: the auth context
:param uid: the user's primary key
:param token: the verification token
"""
user = User.load(uid)
if not user:
raise HTTPError(http.BAD_REQUEST)
destination = request.args.get('destination')
if not destination:
raise HTTPError(http.BAD_REQUEST)
# if user is already logged in
if auth and auth.user:
# if it is a wrong user
if auth.user._id != user._id:
return auth_logout(redirect_url=request.url)
# if it is the expected user
new = request.args.get('new', None)
if destination in campaigns.get_campaigns():
# external domain takes priority
campaign_url = campaigns.external_campaign_url_for(destination)
if not campaign_url:
campaign_url = campaigns.campaign_url_for(destination)
return redirect(campaign_url)
if new:
status.push_status_message(language.WELCOME_MESSAGE, kind='default', jumbotron=True, trust=True)
return redirect(web_url_for('dashboard'))
# token is invalid
if token not in user.email_verifications:
raise HTTPError(http.BAD_REQUEST)
verification = user.email_verifications[token]
email = verification['email']
provider = verification['external_identity'].keys()[0]
provider_id = verification['external_identity'][provider].keys()[0]
# wrong provider
if provider not in user.external_identity:
raise HTTPError(http.BAD_REQUEST)
external_status = user.external_identity[provider][provider_id]
try:
ensure_external_identity_uniqueness(provider, provider_id, user)
except ValidationError as e:
raise HTTPError(http.FORBIDDEN, e.message)
if not user.is_registered:
user.register(email)
if email.lower() not in user.emails:
user.emails.append(email.lower())
user.date_last_logged_in = timezone.now()
user.external_identity[provider][provider_id] = 'VERIFIED'
user.social[provider.lower()] = provider_id
del user.email_verifications[token]
user.verification_key = generate_verification_key()
user.save()
service_url = request.url
if external_status == 'CREATE':
mails.send_mail(
to_addr=user.username,
mail=mails.WELCOME,
mimetype='html',
user=user
)
service_url += '&{}'.format(urllib.urlencode({'new': 'true'}))
elif external_status == 'LINK':
mails.send_mail(
user=user,
to_addr=user.username,
mail=mails.EXTERNAL_LOGIN_LINK_SUCCESS,
external_id_provider=provider,
)
# redirect to CAS and authenticate the user with the verification key
return redirect(cas.get_login_url(
service_url,
username=user.username,
verification_key=user.verification_key
))
|
18a92d289e63224b245e4e958efd6d5924495ce1
| 23,831 |
def date_since_epoch(date, unit='day'):
""" Get the date for the specified date in unit
:param date: the date in the specified unit
:type date: int
:param unit: one of 'year', 'month' 'week', 'day', 'hour', 'minute',
or 'second'
:return: the corresponding date
:rtype: ee.Date
"""
epoch = ee.Date(EE_EPOCH.isoformat())
return epoch.advance(date, unit)
|
f787170869ba081a2d321d0198d27948dc44ffa6
| 23,832 |
def eval_assoc(param_list, meta):
"""
Evaluate the assoication score between a given text and
a list of categories or statements.
Param 1 - string, the text in question
Param 2 - list of strings, the list of categories to associate Param 1 to
"""
data = {
'op': 'eval_assoc',
'text': param_list[0],
'cats': param_list[1]
}
return BART_API.post(data)['sorted_associations']
|
f49d5080d0b5f6a526be11b487bbbf17782d7197
| 23,833 |
def quiver3d(*args, **kwargs):
"""Wraps `mayavi.mlab.quiver3d`
Args:
*args: passed to `mayavi.mlab.quiver3d`
**kwargs: Other Arguments are popped, then kwargs is passed to
`mayavi.mlab.quiver3d`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
TYPE: Description
"""
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
quivers = mlab.quiver3d(*args, **kwargs)
apply_cmap(quivers, mode='scalar', **cmap_kwargs)
apply_cmap(quivers, mode='vector', **cmap_kwargs)
return quivers
|
8cfd494f0b801490372d94f7ab842c0b5cd19099
| 23,834 |
def get_instance_types(self):
"""
Documentation:
---
Description:
Generate SSH pub
"""
instance_types = sorted([instance_type["InstanceType"] for instance_type in self.ec2_client.describe_instance_types()["InstanceTypes"]])
return instance_types
|
583311de8b2f23a967e40c8be5d140f6ab28244c
| 23,835 |
def ret_digraph_points(sed, digraph):
"""Finds the digraph points of the subject extracted data.
Parameters
----------
`sed` (object) "_subject","_track_code", "data": [{"digraph","points"}]
Returns
---------
(list) The points of the particular digraph found
"""
ret = [d['points'] for d in sed['data'] if d['digraph'] == digraph]
if ret == []:
pynocomm.send_to_node(
'**Warning: No digraph points found in ret_digraph_points, digraph:' + digraph)
_foo = 1
else:
ret = ret[0]
return ret
|
853dac3afdb542dbc2878340c8d75b8b4544c531
| 23,836 |
def _sizeof_fmt(num):
"""Format byte size to human-readable format.
https://web.archive.org/web/20111010015624/http://blogmag.net/blog/read/38/Print_human_readable_file_size
Args:
num (float): Number of bytes
"""
for x in ["bytes", "KB", "MB", "GB", "TB", "PB"]:
if num < 1024.0:
return f"{num:3.1f} {x}"
num /= 1024.0
|
97c700954248a455592b3da9b274bfda69a7370f
| 23,837 |
from .. import sim
from typing import Dict
def gatherData(gatherLFP = True):
"""
Function for/to <short description of `netpyne.sim.gather.gatherData`>
Parameters
----------
gatherLFP : bool
<Short description of gatherLFP>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
"""
sim.timing('start', 'gatherTime')
## Pack data from all hosts
if sim.rank==0:
print('\nGathering data...')
# flag to avoid saving sections data for each cell (saves gather time and space; cannot inspect cell secs or re-simulate)
if not sim.cfg.saveCellSecs:
for cell in sim.net.cells:
cell.secs = None
cell.secLists = None
# flag to avoid saving conns data for each cell (saves gather time and space; cannot inspect cell conns or re-simulate)
if not sim.cfg.saveCellConns:
for cell in sim.net.cells:
cell.conns = []
# Store conns in a compact list format instead of a long dict format (cfg.compactConnFormat contains list of keys to include)
elif sim.cfg.compactConnFormat:
sim.compactConnFormat()
# remove data structures used to calculate LFP
if gatherLFP and sim.cfg.recordLFP and hasattr(sim.net, 'compartCells') and sim.cfg.createNEURONObj:
for cell in sim.net.compartCells:
try:
del cell.imembVec
del cell.imembPtr
del cell._segCoords
except:
pass
for pop in list(sim.net.pops.values()):
try:
del pop._morphSegCoords
except:
pass
simDataVecs = ['spkt', 'spkid', 'stims', 'dipole'] + list(sim.cfg.recordTraces.keys())
if sim.cfg.recordDipoles:
_aggregateDipoles()
simDataVecs.append('dipole')
singleNodeVecs = ['t']
if sim.nhosts > 1: # only gather if >1 nodes
netPopsCellGids = {popLabel: list(pop.cellGids) for popLabel,pop in sim.net.pops.items()}
# gather only sim data
if getattr(sim.cfg, 'gatherOnlySimData', False):
nodeData = {'simData': sim.simData}
data = [None]*sim.nhosts
data[0] = {}
for k,v in nodeData.items():
data[0][k] = v
gather = sim.pc.py_alltoall(data)
sim.pc.barrier()
if sim.rank == 0: # simData
print(' Gathering only sim data...')
sim.allSimData = Dict()
for k in list(gather[0]['simData'].keys()): # initialize all keys of allSimData dict
if gatherLFP and k == 'LFP':
sim.allSimData[k] = np.zeros((gather[0]['simData']['LFP'].shape))
elif sim.cfg.recordDipoles and k == 'dipole':
for dk in sim.cfg.recordDipoles:
sim.allSimData[k][dk] = np.zeros(len(gather[0]['simData']['dipole'][dk]))
else:
sim.allSimData[k] = {}
for key in singleNodeVecs: # store single node vectors (eg. 't')
sim.allSimData[key] = list(nodeData['simData'][key])
# fill in allSimData taking into account if data is dict of h.Vector (code needs improvement to be more generic)
for node in gather: # concatenate data from each node
for key,val in node['simData'].items(): # update simData dics of dics of h.Vector
if key in simDataVecs: # simData dicts that contain Vectors
if isinstance(val, dict):
for key2,val2 in val.items():
if isinstance(val2,dict):
sim.allSimData[key].update(Dict({key2:Dict()}))
for stim,val3 in val2.items():
sim.allSimData[key][key2].update({stim:list(val3)}) # udpate simData dicts which are dicts of dicts of Vectors (eg. ['stim']['cell_1']['backgrounsd']=h.Vector)
#elif key == 'dipole':
# sim.allSimData[key][key2] = np.add(sim.allSimData[key][key2],val2.as_numpy()) # add together dipole values from each node
else:
sim.allSimData[key].update({key2:list(val2)}) # udpate simData dicts which are dicts of Vectors (eg. ['v']['cell_1']=h.Vector)
else:
sim.allSimData[key] = list(sim.allSimData[key])+list(val) # udpate simData dicts which are Vectors
elif gatherLFP and key == 'LFP':
sim.allSimData[key] += np.array(val)
elif key not in singleNodeVecs:
sim.allSimData[key].update(val) # update simData dicts which are not Vectors
if len(sim.allSimData['spkt']) > 0:
sim.allSimData['spkt'], sim.allSimData['spkid'] = zip(*sorted(zip(sim.allSimData['spkt'], sim.allSimData['spkid']))) # sort spks
sim.allSimData['spkt'], sim.allSimData['spkid'] = list(sim.allSimData['spkt']), list(sim.allSimData['spkid'])
sim.net.allPops = ODict() # pops
for popLabel,pop in sim.net.pops.items(): sim.net.allPops[popLabel] = pop.__getstate__() # can't use dict comprehension for OrderedDict
sim.net.allCells = [c.__dict__ for c in sim.net.cells]
# gather cells, pops and sim data
else:
nodeData = {'netCells': [c.__getstate__() for c in sim.net.cells], 'netPopsCellGids': netPopsCellGids, 'simData': sim.simData}
data = [None]*sim.nhosts
data[0] = {}
for k,v in nodeData.items():
data[0][k] = v
#print data
gather = sim.pc.py_alltoall(data)
sim.pc.barrier()
if sim.rank == 0:
allCells = []
allPops = ODict()
for popLabel,pop in sim.net.pops.items(): allPops[popLabel] = pop.__getstate__() # can't use dict comprehension for OrderedDict
allPopsCellGids = {popLabel: [] for popLabel in netPopsCellGids}
sim.allSimData = Dict()
for k in list(gather[0]['simData'].keys()): # initialize all keys of allSimData dict
if gatherLFP and k == 'LFP':
sim.allSimData[k] = np.zeros((gather[0]['simData']['LFP'].shape))
elif sim.cfg.recordDipoles and k == 'dipole':
for dk in sim.cfg.recordDipoles:
sim.allSimData[k][dk] = np.zeros(len(gather[0]['simData']['dipole'][dk]))
else:
sim.allSimData[k] = {}
for key in singleNodeVecs: # store single node vectors (eg. 't')
sim.allSimData[key] = list(nodeData['simData'][key])
# fill in allSimData taking into account if data is dict of h.Vector (code needs improvement to be more generic)
for node in gather: # concatenate data from each node
allCells.extend(node['netCells']) # extend allCells list
for popLabel,popCellGids in node['netPopsCellGids'].items():
allPopsCellGids[popLabel].extend(popCellGids)
for key,val in node['simData'].items(): # update simData dics of dics of h.Vector
if key in simDataVecs: # simData dicts that contain Vectors
if isinstance(val,dict):
for key2,val2 in val.items():
if isinstance(val2,dict):
sim.allSimData[key].update(Dict({key2:Dict()}))
for stim,val3 in val2.items():
sim.allSimData[key][key2].update({stim:list(val3)}) # udpate simData dicts which are dicts of dicts of Vectors (eg. ['stim']['cell_1']['backgrounsd']=h.Vector)
#elif key == 'dipole':
# sim.allSimData[key][key2] = np.add(sim.allSimData[key][key2],val2.as_numpy()) # add together dipole values from each node
else:
sim.allSimData[key].update({key2:list(val2)}) # udpate simData dicts which are dicts of Vectors (eg. ['v']['cell_1']=h.Vector)
else:
sim.allSimData[key] = list(sim.allSimData[key])+list(val) # udpate simData dicts which are Vectors
elif gatherLFP and key == 'LFP':
sim.allSimData[key] += np.array(val)
elif key not in singleNodeVecs:
sim.allSimData[key].update(val) # update simData dicts which are not Vectors
if len(sim.allSimData['spkt']) > 0:
sim.allSimData['spkt'], sim.allSimData['spkid'] = zip(*sorted(zip(sim.allSimData['spkt'], sim.allSimData['spkid']))) # sort spks
sim.allSimData['spkt'], sim.allSimData['spkid'] = list(sim.allSimData['spkt']), list(sim.allSimData['spkid'])
sim.net.allCells = sorted(allCells, key=lambda k: k['gid'])
for popLabel,pop in allPops.items():
pop['cellGids'] = sorted(allPopsCellGids[popLabel])
sim.net.allPops = allPops
# clean to avoid mem leaks
for node in gather:
if node:
node.clear()
del node
for item in data:
if item:
item.clear()
del item
else: # if single node, save data in same format as for multiple nodes for consistency
if sim.cfg.createNEURONObj:
sim.net.allCells = [Dict(c.__getstate__()) for c in sim.net.cells]
else:
sim.net.allCells = [c.__dict__ for c in sim.net.cells]
sim.net.allPops = ODict()
for popLabel,pop in sim.net.pops.items(): sim.net.allPops[popLabel] = pop.__getstate__() # can't use dict comprehension for OrderedDict
sim.allSimData = Dict()
for k in list(sim.simData.keys()): # initialize all keys of allSimData dict
sim.allSimData[k] = Dict()
for key,val in sim.simData.items(): # update simData dics of dics of h.Vector
if key in simDataVecs+singleNodeVecs: # simData dicts that contain Vectors
if isinstance(val,dict):
for cell,val2 in val.items():
if isinstance(val2,dict):
sim.allSimData[key].update(Dict({cell:Dict()}))
for stim,val3 in val2.items():
sim.allSimData[key][cell].update({stim:list(val3)}) # udpate simData dicts which are dicts of dicts of Vectors (eg. ['stim']['cell_1']['backgrounsd']=h.Vector)
else:
sim.allSimData[key].update({cell:list(val2)}) # udpate simData dicts which are dicts of Vectors (eg. ['v']['cell_1']=h.Vector)
else:
sim.allSimData[key] = list(sim.allSimData[key])+list(val) # udpate simData dicts which are Vectors
else:
sim.allSimData[key] = val # update simData dicts which are not Vectors
## Print statistics
sim.pc.barrier()
if sim.rank == 0:
sim.timing('stop', 'gatherTime')
if sim.cfg.timing: print((' Done; gather time = %0.2f s.' % sim.timingData['gatherTime']))
print('\nAnalyzing...')
sim.totalSpikes = len(sim.allSimData['spkt'])
sim.totalSynapses = sum([len(cell['conns']) for cell in sim.net.allCells])
if sim.cfg.createPyStruct:
if sim.cfg.compactConnFormat:
preGidIndex = sim.cfg.compactConnFormat.index('preGid') if 'preGid' in sim.cfg.compactConnFormat else 0
sim.totalConnections = sum([len(set([conn[preGidIndex] for conn in cell['conns']])) for cell in sim.net.allCells])
else:
sim.totalConnections = sum([len(set([conn['preGid'] for conn in cell['conns']])) for cell in sim.net.allCells])
else:
sim.totalConnections = sim.totalSynapses
sim.numCells = len(sim.net.allCells)
if sim.totalSpikes > 0:
sim.firingRate = float(sim.totalSpikes)/sim.numCells/sim.cfg.duration*1e3 # Calculate firing rate
else:
sim.firingRate = 0
if sim.numCells > 0:
sim.connsPerCell = sim.totalConnections/float(sim.numCells) # Calculate the number of connections per cell
sim.synsPerCell = sim.totalSynapses/float(sim.numCells) # Calculate the number of connections per cell
else:
sim.connsPerCell = 0
sim.synsPerCell = 0
print((' Cells: %i' % (sim.numCells) ))
print((' Connections: %i (%0.2f per cell)' % (sim.totalConnections, sim.connsPerCell)))
if sim.totalSynapses != sim.totalConnections:
print((' Synaptic contacts: %i (%0.2f per cell)' % (sim.totalSynapses, sim.synsPerCell)))
if 'runTime' in sim.timingData:
print((' Spikes: %i (%0.2f Hz)' % (sim.totalSpikes, sim.firingRate)))
print((' Simulated time: %0.1f s; %i workers' % (sim.cfg.duration/1e3, sim.nhosts)))
print((' Run time: %0.2f s' % (sim.timingData['runTime'])))
if sim.cfg.printPopAvgRates and not sim.cfg.gatherOnlySimData:
trange = sim.cfg.printPopAvgRates if isinstance(sim.cfg.printPopAvgRates, list) else None
sim.allSimData['popRates'] = sim.analysis.popAvgRates(tranges=trange)
if 'plotfI' in sim.cfg.analysis:
sim.analysis.calculatefI() # need to call here so data is saved to file
sim.allSimData['avgRate'] = sim.firingRate # save firing rate
return sim.allSimData
|
a40b61088aaedbb8f866014f933671b2264a2031
| 23,838 |
def stations_by_distance(stations, p):
"""For a list of stations (MonitoringStation object) and
coordinate p (latitude, longitude), returns list of tuples
(station, distance) sorted by the distance from the given
coordinate p"""
# Create the list of (stations, distance) tuples
station_dist = []
# Append data to the list
for station in stations:
station_dist.append((station, haversine(p, station.coord)))
# Return station_dist list sorted by the distance from p
return sorted_by_key(station_dist, 1)
|
098e692c2ec18b7c15cebf84043eaca768566075
| 23,839 |
from typing import TextIO
from typing import Optional
from typing import Dict
import csv
def process(fh: TextIO, headers: Optional[Dict[str, str]],
writer: csv.DictWriter, args: Args) -> int:
"""
Process the file into Mongo (client)
First 5 columns are: STREAM, DATE, STATION, REP, #GRIDS
Columns after that are the measurements
"""
reader = csv.DictReader(fh, delimiter=',')
flds = reader.fieldnames
values = defaultdict(list) # to average replicates
# Parse file into values for each variable, station, and date
for i, row in enumerate(reader, start=1):
# Base record has station/date
station = get_station(row.get('STATION', ''))
date = get_date(row.get('DATE', ''))
if not all([date, station]):
continue
for fld in filter(lambda f: f != '', flds[5:]):
raw_val = row[fld].strip()
if raw_val == '':
continue
# Remove leading "="?
if raw_val.startswith('='):
raw_val = raw_val[1:]
# Try to convert value to float
val = None
try:
val = float(raw_val)
except Exception:
continue
if val is not None:
values[(fld, station, date)].append(val)
# Write the averages for each variable, station, and date
num_written = 0
for key, replicates in values.items():
fld, station, date = key
# Maybe convert "ACENTR" -> "Ephemeroptera Baetidae Acentrella spp."
variable = headers.get(fld.upper(), fld) if headers else fld
# Take the average of the values
val = mean(replicates)
print(f'{fld} {station} {date} => {val}')
writer.writerow({
'source': args.source,
'unit': '',
'location_name': station,
'location_type': 'station',
'variable_name': fld,
'variable_desc': variable,
'collected_on': date,
'value': val,
'medium': args.medium
})
num_written += 1
return num_written
|
40a68091d65e1f9a56ca150703aaaa207ef438a2
| 23,840 |
def dipole_moment_programs():
""" Constructs a list of program modules implementing
static dipole moment output readers.
"""
return pm.program_modules_with_function(pm.Job.DIP_MOM)
|
a225997a445451411819ebfb8c7bf14629ee3742
| 23,841 |
def kappa(a, b, c, d):
""" GO term 2
| yes | no |
-------------------------------
GO | yes | a | b |
term1 | no | c | d |
kapa(GO_1, GO_2) = 1 - (1 - po) / (1 - pe)
po = (a + d) / (a + b + c + d)
marginal_a = ( (a + b) * ( a + c )) / (a + b + c + d)
marginal_b = ( (c + d) * ( b + d )) / (a + b + c + d)
pe = (marginal_a + marginal_b) / (a + b + c + d)
"""
a = float(len(a))
b = float(len(b))
c = float(len(c))
d = float(len(d))
po = (a + d) / (a + b + c + d)
marginal_a = ( (a + b) * ( a + c )) / (a + b + c + d)
marginal_b = ( (c + d) * ( b + d )) / (a + b + c + d)
pe = (marginal_a + marginal_b) / (a + b + c + d)
#print (f" {a} | {b}\n {c} | {d}")
return 1 - (1 - po) / (1 - pe)
|
5884a6745f6a93b044eabb1bfe38834cb59366d4
| 23,842 |
import functools
import torch
def test_CreativeProject_integration_ask_tell_ask_works(covars, model_type, train_X, train_Y,
covars_proposed_iter, covars_sampled_iter,
response_sampled_iter, monkeypatch):
"""
test that both surrogate model and acquisition functions are added and updated following two rounds of ask-tell.
Monkeypatch "_read_covars_manual_input" and "_read_response_manual_input" from ._observe.py to circumvent manual
input via builtins.input. This automatically tests the new functionality of random start by starting from no data
(train_X, train_Y)
"""
# initialize the class
# random_start = True is default, so this tests random start
cc = TuneSession(covars=covars, model=model_type)
# set attributes on class (to simulate previous iterations of ask/tell functionality)
cc.train_X = train_X
cc.proposed_X = train_X
cc.train_Y = train_Y
cc.model["covars_proposed_iter"] = covars_proposed_iter
cc.model["covars_sampled_iter"] = covars_sampled_iter
cc.model["response_sampled_iter"] = response_sampled_iter
# define decorator to add 1.0 to all entries in monkeypatched returned data. This to be able to tell that the last
# entry (from second "tell") is different than the first, and know that it has been overwritten
def add_one(func):
@functools.wraps(func)
def wrapper_add_one(*args, **kwargs):
wrapper_add_one.num_calls += 1
output = func(*args, **kwargs)
return output + wrapper_add_one.num_calls
wrapper_add_one.num_calls = 0
return wrapper_add_one
# monkeypatch "_read_covars_manual_input"
candidate_tensor = torch.tensor([[tmp[0] for tmp in covars]], dtype=torch.double)
@add_one
def mock_read_covars_manual_input(additional_text):
return candidate_tensor
monkeypatch.setattr(cc, "_read_covars_manual_input", mock_read_covars_manual_input)
# monkeypatch "_read_response_manual_input"
resp_tensor = torch.tensor([[12]], dtype=torch.double)
@add_one
def mock_read_response_manual_input(additional_text):
return resp_tensor
monkeypatch.setattr(cc, "_read_response_manual_input", mock_read_response_manual_input)
# run the ask method
cc.ask()
# run the tell method
cc.tell()
# test that data is added to pretty formats
assert cc.x_data.shape[0] == 1
for i in range(candidate_tensor.size()[1]):
col = cc.x_data.columns[i]
assert cc.x_data[col].iloc[-1] == candidate_tensor[0, i].item() + 1
assert cc.y_data.shape[0] == 1
assert cc.y_data["Response"].iloc[-1] == resp_tensor[0, 0].item() + 1
# grab the model state
surrogate_model1 = cc.model["model"]
# run the ask method AGAIN
cc.ask()
# grab the acquisition function
acq_func1 = cc.acq_func["object"]
# run the tell method AGAIN
cc.tell()
# test that new rows are added to pretty format data
print(candidate_tensor)
print(cc.x_data)
assert cc.x_data.shape[0] == 2
for i in range(candidate_tensor.size()[1]):
col = cc.x_data.columns[i]
assert cc.x_data[col].iloc[-1] == candidate_tensor[0, i].item() + 2
assert cc.y_data.shape[0] == 2
assert cc.y_data["Response"].iloc[-1] == resp_tensor[0, 0].item() + 2
# grab the model state
surrogate_model2 = cc.model["model"]
# run the ask method a THIRD TIME
cc.ask()
# grab the acquisition function
acq_func2 = cc.acq_func["object"]
# assert that both model and acquisition functions exist
assert cc.model["model"] is not None
assert cc.acq_func["object"] is not None
# assert that surrogate model has updated
assert surrogate_model1 != surrogate_model2
# assert that acquisition function has updated
assert acq_func1 != acq_func2
|
98d665e85b19acf956026848614d9b49e204afb8
| 23,843 |
def ferret_init(id):
"""
Initialization for the stats_chisquare Ferret PyEF
"""
axes_values = [ pyferret.AXIS_DOES_NOT_EXIST ] * pyferret.MAX_FERRET_NDIM
axes_values[0] = pyferret.AXIS_CUSTOM
false_influences = [ False ] * pyferret.MAX_FERRET_NDIM
retdict = { "numargs": 3,
"descript": "Returns chi-square test stat. and prob. (and num. good categories, N) " \
"that sample counts of cat. data matches pop. expected counts. ",
"axes": axes_values,
"argnames": ( "SAMPLE_CNTS", "EXPECT_CNTS", "DELTA_DEGFREE", ),
"argdescripts": ( "Sample counts of categorical data",
"Expected counts or relative frequencies (will be adjusted)",
"Difference from standard (N-1) degrees of freedom (num. computed parameters)", ),
"argtypes": ( pyferret.FLOAT_ARRAY, pyferret.FLOAT_ARRAY, pyferret.FLOAT_ONEVAL, ),
"influences": ( false_influences, false_influences, false_influences, ),
}
return retdict
|
5231fec470d8c968334d6f72766cc1d0ad9ae61c
| 23,844 |
def has(key):
"""Checks if the current context contains the given key."""
return not not (key in Context.currentContext.values)
|
0c6c46812e97c9d38d101dcc06346b329a3cd81a
| 23,845 |
def VGG_16(weights_path=None):
"""
Creates a convolutional keras neural network, training it with data from ct scans from both datasets.
Using the VGG-16 architecture.
----
Returns the model
"""
X_train, Y_train = loadfromh5(1, 2, 19)
X_train1, Y_train1 = loadfromh5(2, 2, 19)
X_train.extend(X_train1)
Y_train.extend(Y_train1)
X_train = np.asarray(X_train).reshape(np.asarray(X_train).shape[0], 64, 64, 1)
# X_train = np.transpose(X_train, (0,3,1,2))
print(X_train.shape)
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(64, 64, 1)))
model.add(Convolution2D(64, 3, 3, activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(4096, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(4096, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(1))
model.compile(loss="mean_squared_error", optimizer="sgd", metrics=["mse"])
K.set_value(model.optimizer.learning_rate, 0.001)
model.fit(X_train, Y_train, batch_size=32, epochs=10, verbose=1)
if weights_path:
model.load_weights(weights_path)
return model
|
640aa7480afac0c8c4b71f3045f702888672172f
| 23,846 |
from scipy import optimize
def inv_fap_davies(p, fmax, t, y, dy, normalization='standard'):
"""Inverse of the davies upper-bound"""
args = (fmax, t, y, dy, normalization)
z0 = inv_fap_naive(p, *args)
func = lambda z, *args: fap_davies(z, *args) - p
res = optimize.root(func, z0, args=args, method='lm')
if not res.success:
raise ValueError('inv_fap_baluev did not converge for p={0}'.format(p))
return res.x
|
9b6f8a82ca25235785d5fe83a5c671130ddd509b
| 23,847 |
def public(request):
"""browse public repos. Login not required"""
username = request.user.get_username()
public_repos = DataHubManager.list_public_repos()
# This should really go through the api... like everything else
# in this file.
public_repos = serializers.serialize('json', public_repos)
return render_to_response("public-browse.html", {
'login': username,
'repo_base': 'repo_base',
'repos': [],
'public_repos': public_repos})
|
0d44053c6db872032b65b4786c5771dbecad946a
| 23,848 |
def get_boundaries_old(im,su=5,sl=5,valley=5,cutoff_max=1.,plt_val=False):
"""Bintu et al 2018 candidate boundary calling"""
im_=np.array(im)
ratio,ration,center,centern=[],[],[],[]
for i in range(len(im)):
x_im_l,y_im_l = [],[]
x_im_r,y_im_r = [],[]
xn_im_l,yn_im_l = [],[]
xn_im_r,yn_im_r = [],[]
for j in range(sl):
xn_im_l.extend(i+j-np.arange(su)-1)
yn_im_l.extend([i+j]*su)
xn_im_r.extend(i+j+sl-np.arange(su)-1)
yn_im_r.extend([i+j+sl]*su)
x_im_l.extend(i+j+np.arange(su)+1)
y_im_l.extend([i+j]*su)
x_im_r.extend(i+j+sl+np.arange(su)+1)
y_im_r.extend([i+j+sl]*su)
x_im_l,y_im_l,x_im_r,y_im_r = list(map(np.array,[x_im_l,y_im_l,x_im_r,y_im_r]))
xn_im_l,yn_im_l,xn_im_r,yn_im_r = list(map(np.array,[xn_im_l,yn_im_l,xn_im_r,yn_im_r]))
in_image = np.all(x_im_l>=0) and np.all(x_im_r>=0) and np.all(y_im_l>=0) and np.all(y_im_r>=0)
in_image = in_image and np.all(x_im_l<len(im)) and np.all(x_im_r<len(im)) and np.all(y_im_l<len(im)) and np.all(y_im_r<len(im))
in_imagen = np.all(xn_im_l>=0) and np.all(xn_im_r>=0) and np.all(yn_im_l>=0) and np.all(yn_im_r>=0)
in_imagen = in_imagen and np.all(xn_im_l<len(im)) and np.all(xn_im_r<len(im)) and np.all(yn_im_l<len(im)) and np.all(yn_im_r<len(im))
if in_image:
val_l,val_r = np.nanmean(im_[x_im_l,y_im_l]),np.nanmean(im_[x_im_r,y_im_r])
ratio.append(val_l/val_r)
center.append(i+sl)
if in_imagen:
val_l,val_r = np.nanmean(im_[xn_im_l,yn_im_l]),np.nanmean(im_[xn_im_r,yn_im_r])
ration.append(val_r/val_l)
centern.append(i+sl)
if False:#i==9:
plt.figure(figsize=(20,20))
plt.plot(xn_im_l,yn_im_l,'mo')
plt.plot(xn_im_r,yn_im_r,'go')
plt.plot(x_im_l,y_im_l,'ro')
plt.plot(x_im_r,y_im_r,'bo')
plt.imshow(im,interpolation='nearest',cmap='seismic_r',vmax=1000)
plt.show()
print(x_im_l,y_im_l,x_im_r,y_im_r)
center,ratio=np.array(center),np.array(ratio)
centern,ration=np.array(centern),np.array(ration)
max_ratio = np.zeros(len(im))+np.nan
max_ratio[center]=ratio
max_ratio[centern]=np.nanmax([max_ratio[centern],ration],axis=0)
local_max_good = get_ind_loc_max(max_ratio,cutoff_max=cutoff_max,valley=valley)
#local_max_goodn = get_ind_loc_max(ration,cutoff_max=cutoff_max,valley=valley)
###Plotting
if plt_val:
#plt.close('all')
plt.figure(figsize=(12,7))
#print local_max_good,local_max_goodn
plt.plot(center,np.log(ratio),'o-')
plt.plot(centern,np.log(ration),'o-')
plt.plot(np.log(max_ratio),'k-')
if len(local_max_good)>0:
plt.plot(local_max_good,np.log(max_ratio[local_max_good]),'o')
plt.show()
fig, ax = plt.subplots(figsize=(12,7))
if len(local_max_good)>0:
ax.plot(local_max_good[:],local_max_good[:],'go',ms=10,mec='k',mew=2)
#cax = ax.imshow(set_diag(img,np.nanmax(img)),interpolation='nearest',cmap='bwr')#,vmax=1000,vmin=0)
cax = ax.imshow(im,interpolation='nearest',cmap='seismic_r',vmax=1000,vmin=0)
cbar = fig.colorbar(cax)
plt.show()
return local_max_good,max_ratio[local_max_good]
|
620fa37306f85a06d88b4f08fd84e9873866e011
| 23,849 |
def zfsr32(val, n):
"""zero fill shift right for 32 bit integers"""
return (val >> n) if val >= 0 else ((val + 4294967296) >> n)
|
4b890caa0b7b086e923e7b229e5551fd66d24016
| 23,850 |
def n_optimize_fn(step: int) -> int:
"""`n_optimize` scheduling function."""
if step <= FLAGS.change_n_optimize_at:
return FLAGS.n_optimize_1
else:
return FLAGS.n_optimize_2
|
2d8b05a19c05a662119e51ace97817246c38ebe3
| 23,851 |
import torch
def compute_receptive_field(model, img_size=(1, 3, 3)):
"""Computes the receptive field for a model.
The receptive field is computed using the magnitude of the gradient of the
model's output with respect to the input.
Args:
model: Model for hich to compute the receptive field. Assumes NCHW input.
img_size: The (channels, height, width) of the input to the model.
"""
c, h, w = img_size
img = torch.randn((1, c, h, w), requires_grad=True)
model(img)[0, 0, h // 2, w // 2].mean().backward()
grad = img.grad.abs()[0, 0, :, :]
return torch.where(grad > 0, torch.ones_like(grad), torch.zeros_like(grad))
|
bdc3065e696bf221698d1abdb0717b7da957ca84
| 23,852 |
def flippv(pv, n):
"""Flips the meaning of an index partition vector.
Parameters
----------
pv : ndarray
The index partition to flip.
n : integer
The length of the dimension to partition.
Returns
-------
notpv : ndarray
The complement of pv.
Example:
>>> import numpy as np
>>> import locate
>>> pv = np.array([0,3,5])
>>> locate.flippv(pv,8)
array([1, 2, 4, 6, 7])
"""
tf = np.ones(n, dtype=bool)
tf[pv] = False
return tf.nonzero()[0]
|
6c063169100eba098460cd12339f7a6266ea01f0
| 23,853 |
from typing import Union
from pathlib import Path
def is_dir(path: Union[str, Path]) -> bool:
"""Check if the given path is a directory
:param path: path to be checked
"""
if isinstance(path, str):
path = Path(path)
if path.exists():
return path.is_dir()
else:
return str(path).endswith("/")
|
540cce7f5c6a25186427ba71b94aa090c2ab90a7
| 23,854 |
from itertools import cycle
def _replace_dendro_colours(
colours,
above_threshold_colour="C0",
non_cluster_colour="black",
colorscale=None
):
""" Returns colorscale used for dendrogram tree clusters.
Keyword arguments:
colorscale -- Colors to use for the plot in rgb format.
Should have 8 colours.
"""
if isinstance(colorscale, str):
colorscale = _mpl_cmap_to_str(colorscale)
elif colorscale is None:
colorscale = [
'rgb(0,116,217)', # instead of blue
'rgb(35,205,205)', # cyan
'rgb(61,153,112)', # green
'rgb(40,35,35)', # black
'rgb(133,20,75)', # magenta
'rgb(255,65,54)', # red
'rgb(255,255,255)', # white
'rgb(255,220,0)', # yellow
]
else:
assert isinstance(colorscale, (list, tuple)), \
"colorscale must be a list or tuple of strings"
assert all(isinstance(c, str) for c in colorscale), \
"colorscale must be a list or tuple of strings"
original_colours = set(colours)
original_colours.remove(above_threshold_colour)
colour_map = dict(zip(original_colours, cycle(colorscale)))
colour_map[above_threshold_colour] = non_cluster_colour
return [colour_map[c] for c in colours]
|
ce50b8c061bb908d8670b059261cde83e7dd62b1
| 23,856 |
import re
def document_to_vector(lemmatized_document, uniques):
"""
Converts a lemmatized document to a bow vector
representation.
1/0 for word exists/doesn't exist
"""
#print(uniques)
# tokenize
words = re.findall(r'\w+', lemmatized_document.lower())
# vector = {}
vector = [0]*len(uniques)
# list of the words is accessible via vector.keys()
# list of 0/1 is accessible via vector.values()
# seen = []
for i in range(len(uniques)):
for j in range(len(words)):
if uniques[i] == words[j]:
vector[i] = 1
continue
return vector
|
e4b108b8e99a827788d7eff5d4eabf71021d6e21
| 23,857 |
def ubcOcTree(FileName_Mesh, FileName_Model, pdo=None):
"""
Description
-----------
Wrapper to Read UBC GIF OcTree mesh and model file pairs. UBC OcTree models are defined using a 2-file format. The "mesh" file describes how the data is descritized. The "model" file lists the physical property values for all cells in a mesh. A model file is meaningless without an associated mesh file. This only handles OcTree formats
Parameters
----------
`FileName_Mesh` : str
- The OcTree Mesh filename as an absolute path for the input mesh file in UBC OcTree Mesh Format
`FileName_Model` : str
- The model filename as an absolute path for the input model file in UBC OcTree Model Format.
`pdo` : vtk.vtkUnstructuredGrid, optional
- The output data object
Returns
-------
Returns a vtkUnstructuredGrid generated from the UBC 2D/3D Mesh grid. Mesh is defined by the input mesh file. Cell data is defined by the input model file.
"""
# Construct/read the mesh
mesh = ubcOcTreeMesh(FileName_Mesh, pdo=pdo)
# Read the model data
# - read model file for OcTree format
if FileName_Model is not None:
model = ubcModel3D(FileName_Model)
# Place the model data onto the mesh
mesh = placeModelOnOcTreeMesh(mesh, model)
return mesh
|
f9d71c7beebc8ca5f3c45fc24a4fd6ccae607634
| 23,858 |
def load_colormaps():
"""Return the provided colormaps."""
return load_builtin_data('colormaps')
|
00b0d73e127cbbf11b76d5a2281493af95337008
| 23,859 |
def discriminator(hr_images, scope, dim):
"""
Discriminator
"""
conv_lrelu = partial(conv, activation_fn=lrelu)
def _combine(x, newdim, name, z=None):
x = conv_lrelu(x, newdim, 1, 1, name)
y = x if z is None else tf.concat([x, z], axis=-1)
return minibatch_stddev_layer(y)
def _conv_downsample(x, dim, ksize, name):
y = conv2d_downscale2d(x, dim, ksize, name=name)
y = lrelu(y)
return y
with tf.compat.v1.variable_scope(scope, reuse=tf.compat.v1.AUTO_REUSE):
with tf.compat.v1.variable_scope("res_4x"):
net = _combine(hr_images[1], newdim=dim, name="from_input")
net = conv_lrelu(net, dim, 3, 1, "conv1")
net = conv_lrelu(net, dim, 3, 1, "conv2")
net = conv_lrelu(net, dim, 3, 1, "conv3")
net = _conv_downsample(net, dim, 3, "conv_down")
with tf.compat.v1.variable_scope("res_2x"):
net = _combine(hr_images[2], newdim=dim, name="from_input", z=net)
dim *= 2
net = conv_lrelu(net, dim, 3, 1, "conv1")
net = conv_lrelu(net, dim, 3, 1, "conv2")
net = conv_lrelu(net, dim, 3, 1, "conv3")
net = _conv_downsample(net, dim, 3, "conv_down")
with tf.compat.v1.variable_scope("res_1x"):
net = _combine(hr_images[4], newdim=dim, name="from_input", z=net)
dim *= 2
net = conv_lrelu(net, dim, 3, 1, "conv")
net = _conv_downsample(net, dim, 3, "conv_down")
with tf.compat.v1.variable_scope("bn"):
dim *= 2
net = conv_lrelu(net, dim, 3, 1, "conv1")
net = _conv_downsample(net, dim, 3, "conv_down1")
net = minibatch_stddev_layer(net)
# dense
dim *= 2
net = conv_lrelu(net, dim, 1, 1, "dense1")
net = conv(net, 1, 1, 1, "dense2")
net = tf.reduce_mean(net, axis=[1, 2])
return net
|
9af149750aed5febd17ab37b1e816356d2e27a40
| 23,860 |
def addchallenges(request) :
"""
管理员添加新的题目
"""
if request.user.is_superuser :
if request.method == 'POST' :
success = 0
form = forms.AddChallengeForm(request.POST, request.FILES)
if form.is_valid() :
success = 1
print(request.FILES)
if request.FILES :
i = models.Challenges(file=request.FILES['file'],
name=request.POST['name'],
category=request.POST['category'],
description=request.POST['description'],
points=request.POST['points'],
challenge_id=assignID(request.POST['name']),
flag=request.POST['flag'],
author=request.POST['author'])
i.save()
else :
i = models.Challenges(
name=request.POST['name'],
category=request.POST['category'],
description=request.POST['description'],
points=request.POST['points'],
challenge_id=assignID(request.POST['name']),
flag=request.POST['flag'],
author=request.POST['author'])
i.save()
return render(request, 'addchallenges.html', {'form':form,'success':success})
else :
form = forms.AddChallengeForm()
return render(request, 'addchallenges.html', {'form':form})
else :
return redirect("/")
|
e7aaa3a8418f66322f050dee74d74fd1d71bc0c9
| 23,861 |
def _decompose_ridge(Xtrain, alphas, n_alphas_batch=None, method="svd",
negative_eigenvalues="zeros"):
"""Precompute resolution matrices for ridge predictions.
To compute the prediction::
Ytest_hat = Xtest @ (XTX + alphas * Id)^-1 @ Xtrain^T @ Ytrain
where XTX = Xtrain^T @ Xtrain,
this function precomputes::
matrices = (XTX + alphas * Id)^-1 @ Xtrain^T.
Parameters
----------
Xtrain : array of shape (n_samples_train, n_features)
Concatenated input features.
alphas : float, or array of shape (n_alphas, )
Range of ridge regularization parameter.
n_alphas_batch : int or None
If not None, returns a generator over batches of alphas.
method : str in {"svd"}
Method used to diagonalize the kernel.
negative_eigenvalues : str in {"nan", "error", "zeros"}
If the decomposition leads to negative eigenvalues (wrongly emerging
from float32 errors):
- "error" raises an error.
- "zeros" remplaces them with zeros.
- "nan" returns nans if the regularization does not compensate
twice the smallest negative value, else it ignores the problem.
Returns
-------
matrices : array of shape (n_alphas, n_samples_test, n_samples_train) or \
(n_alphas, n_features, n_samples_train) if test is not None
Precomputed resolution matrices.
alpha_batch : slice
Slice of the batch of alphas.
"""
backend = get_backend()
use_alpha_batch = n_alphas_batch is not None
if n_alphas_batch is None:
n_alphas_batch = len(alphas)
if method == "svd":
# SVD: X = U @ np.diag(eigenvalues) @ Vt
U, eigenvalues, Vt = backend.svd(Xtrain, full_matrices=False)
else:
raise ValueError("Unknown method=%r." % (method, ))
for start in range(0, len(alphas), n_alphas_batch):
batch = slice(start, start + n_alphas_batch)
ev_weighting = eigenvalues / (alphas[batch, None] + eigenvalues ** 2)
# negative eigenvalues can emerge from incorrect kernels,
# or from float32
if eigenvalues[0] < 0:
if negative_eigenvalues == "nan":
ev_weighting[alphas[batch] < -eigenvalues[0] * 2, :] = \
backend.asarray(backend.nan, type=ev_weighting.dtype)
elif negative_eigenvalues == "zeros":
eigenvalues[eigenvalues < 0] = 0
elif negative_eigenvalues == "error":
raise RuntimeError(
"Negative eigenvalues. Make sure the kernel is positive "
"semi-definite, increase the regularization alpha, or use"
"another solver.")
else:
raise ValueError("Unknown negative_eigenvalues=%r." %
(negative_eigenvalues, ))
matrices = backend.matmul(Vt.T, ev_weighting[:, :, None] * U.T)
if use_alpha_batch:
yield matrices, batch
else:
return matrices, batch
del matrices
|
ba7d466546f4d417f9f455aee5fa0cccdaba968c
| 23,862 |
import requests
def get_new_listing(old_listing):
"""Get the new listing."""
try:
fetched_listing = requests.get(cfg['api_url']).json()['product']
except requests.exceptions.RequestException:
return old_listing
else:
old_item_ids = {old_item['productId'] for old_item in old_listing}
new_listing = [fetched_item for fetched_item in fetched_listing if
fetched_item['productId'] not in old_item_ids]
if new_listing:
save_listing(new_listing)
return new_listing
|
f8aa02d1a804ef5cfbb1192da15091d8e8816d16
| 23,863 |
from typing import Union
import logging
def create_user(engine: create_engine, data: dict) -> Union[User, None]:
"""
Function for creating row in database
:param engine: sqlmodel's engine
:param data: dictionary with data that represents user
:return: Created user instance or nothing
"""
logging.info('Creating an user')
user = User(**data)
with Session(engine) as session:
try:
session.add(user)
session.commit()
session.refresh(user)
logging.info('User was created')
except exc.CompileError:
logging.warning('User was not created')
return None
return user
|
bb1dff7aca37a8a1eab9104d0b6cd27cb55f78da
| 23,864 |
def _densify_2D(a, fact=2):
"""Densify a 2D array using np.interp.
:fact - the factor to density the line segments by
:Notes
:-----
:original construction of c rather than the zero's approach
: c0 = c0.reshape(n, -1)
: c1 = c1.reshape(n, -1)
: c = np.concatenate((c0, c1), 1)
"""
# Y = a changed all the y's to a
a = np.squeeze(a)
n_fact = len(a) * fact
b = np.arange(0, n_fact, fact)
b_new = np.arange(n_fact - 1) # Where you want to interpolate
c0 = np.interp(b_new, b, a[:, 0])
c1 = np.interp(b_new, b, a[:, 1])
n = c0.shape[0]
c = np.zeros((n, 2))
c[:, 0] = c0
c[:, 1] = c1
return c
|
e9a881f014c9ebcae6f3550c3b0c4d7beb576203
| 23,865 |
from typing import Union
def get_neighbor_edge(
graph: srf.Alignment,
edge: tuple[int, int],
column: str = 'z',
direction: str = 'up',
window: Union[None, int] = None,
statistic: str = 'min'
) -> Union[None, tuple[int, int]]:
"""Return the neighboring edge having the lowest minimum value
Parameters:
graph: directed network graph
edge: edge for which to determine a neighbor
Other Parameters:
column: column to test in vertices
direction: 'up' tests predecessor edges; 'down' tests successors
window: number of neighbor vertices to test
statistic: test statistic
Returns:
edge meeting the criteria
"""
vertices = graph.vertices
result = None
val = None
if direction == 'up':
neighbors = [(i, edge[0]) for i in graph.predecessors(edge[0])]
else:
neighbors = [(edge[1], i) for i in graph.successors(edge[1])]
if len(neighbors) > 0:
for neighbor in neighbors:
if window:
test_verts = vertices[vertices['edge'] == neighbor].tail(window)
else:
test_verts = vertices[vertices['edge'] == neighbor]
if statistic == 'min':
test_val = test_verts[column].min()
if val:
if test_val < val:
result = neighbor
val = test_val
else:
result = neighbor
val = test_val
return result
|
df81a5480a673efbf66bda7951b634f99996ffcf
| 23,867 |
def show_comparison(x_coordinates: np.ndarray, analytic_expression: callable, numeric_solution: [dict, np.ndarray],
numeric_label: str = "Numeric Solution", analytic_label: str = "Analytic Solution",
title: str = None, x_label: str = None, y_label: str = None, save_file_as: str = None):
"""
Method that shows the comparison between the analytic and numeric solutions.
:param x_coordinates: Array of input values for function.
:param numeric_solution: Array of values for the numeric solution.
:param analytic_expression: Function that describes the analytic solution.
:param numeric_label: Label for numeric solution on graph.
:param analytic_label: Label for analytic solution on graph.
:param title: Title of plot figure.
:param x_label: Label for the x axis.
:param y_label: Label for the y axis.
:param save_file_as: Filename used to save generated figure. If not defined figure is not saved.
:return: Displays the graphical comparison.
"""
check_method_call(x_coordinates)
check_method_call(analytic_expression)
check_method_call(numeric_solution)
analytic_solution = analytic_expression(x_coordinates)
default_cycler = cycler('color', ['b', 'g', 'k']) * cycler('linestyle', ['--', '-.', ':'])
plt.rc('axes', prop_cycle=default_cycler)
plt.plot(x_coordinates, analytic_solution, "r-", label=analytic_label)
if isinstance(numeric_solution, dict):
[plt.plot(x_coordinates, numeric_solution[key], label=("{:.4f}s".format(key) if isinstance(key, (float, int))
else key)) for key in sorted(numeric_solution)]
else:
plt.plot(x_coordinates, numeric_solution, "b--", label=numeric_label)
axes = plt.gca()
if x_label:
axes.set_xlabel(x_label)
if y_label:
axes.set_ylabel(y_label)
if title:
axes.set_title(title)
plt.grid()
plt.legend()
# Calculate errors
numeric_solution = np.array(numeric_solution if not isinstance(numeric_solution, dict) else
numeric_solution[max(numeric_solution.keys())])
error_array = np.nan_to_num(abs(numeric_solution - analytic_solution)/analytic_solution)
print("Mean Error: {0}\nStandard Error: {1}".format(np.mean(error_array), np.std(error_array)))
if save_file_as is not None and isinstance(save_file_as, str):
plt.savefig("{0}".format(save_file_as))
return plt.show()
|
10727c4db401469f88fa93db31a13e21037f8e63
| 23,868 |
def has_master(mc: MasterCoordinator) -> bool:
""" True if `mc` has a master. """
return bool(mc.sc and not mc.sc.master and mc.sc.master_url)
|
314fc4a2aa4deed7291d4676230bc9dafbb142d8
| 23,869 |
import time
def sz_margin_details(date='', retry_count=3, pause=0.001):
"""
获取深市融资融券明细列表
Parameters
--------
date:string
明细数据日期 format:YYYY-MM-DD 默认为空''
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期
stockCode:标的证券代码
securityAbbr:标的证券简称
rzmre: 融资买入额(元)
rzye:融资余额(元)
rqmcl: 融券卖出量
rqyl: 融券余量
rqye: 融券余量(元)
rzrqye:融资融券余额(元)
"""
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.MAR_SZ_MX_URL%(ct.P_TYPE['http'], ct.DOMAINS['szse'],
ct.PAGES['szsefc'], date))
lines = urlopen(request, timeout = 10).read()
if len(lines) <= 200:
return pd.DataFrame()
df = pd.read_html(lines, skiprows=[0])[0]
df.columns = rv.MAR_SZ_MX_COLS
df['stockCode'] = df['stockCode'].map(lambda x:str(x).zfill(6))
df['opDate'] = date
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
|
115ef7ec68a08de065f086b53835c7bd05a34ff8
| 23,870 |
def delay_slot_insn(*args):
"""
delay_slot_insn(ea, bexec, fexec) -> bool
Helper function to get the delay slot instruction.
@param ea (C++: ea_t *)
@param bexec (C++: bool *)
@param fexec (C++: bool *)
"""
return _ida_idp.delay_slot_insn(*args)
|
6b0316845c4cefa2a33a9e9f5e0c24c1f2920a52
| 23,871 |
import numpy
def pairwise_radial_basis(K: numpy.ndarray, B: numpy.ndarray) -> numpy.ndarray:
"""Compute the TPS radial basis function phi(r) between every row-pair of K
and B where r is the Euclidean distance.
Arguments
---------
K : numpy.array
n by d vector containing n d-dimensional points.
B : numpy.array
m by d vector containing m d-dimensional points.
Return
------
P : numpy.array
n by m matrix where.
P(i, j) = phi( norm( K(i,:) - B(j,:) ) ),
where phi(r) = r^2*log(r), if r >= 1
r*log(r^r), if r < 1
"""
# r_mat(i, j) is the Euclidean distance between K(i, :) and B(j, :).
r_mat = cdist(K, B)
pwise_cond_ind1 = r_mat >= 1
pwise_cond_ind2 = r_mat < 1
r_mat_p1 = r_mat[pwise_cond_ind1]
r_mat_p2 = r_mat[pwise_cond_ind2]
# P correcponds to the matrix K from [1].
P = numpy.empty(r_mat.shape)
P[pwise_cond_ind1] = (r_mat_p1**2) * numpy.log(r_mat_p1)
P[pwise_cond_ind2] = r_mat_p2 * numpy.log(numpy.power(r_mat_p2, r_mat_p2))
return P
|
5ce4ff200bf953d80b7aff30b84b4ae0a62a0445
| 23,873 |
def low_index_subgroups(G, N, Y=[]):
"""
Implements the Low Index Subgroups algorithm, i.e find all subgroups of
``G`` upto a given index ``N``. This implements the method described in
[Sim94]. This procedure involves a backtrack search over incomplete Coset
Tables, rather than over forced coincidences.
Parameters
==========
G: An FpGroup < X|R >
N: positive integer, representing the maximum index value for subgroups
Y: (an optional argument) specifying a list of subgroup generators, such
that each of the resulting subgroup contains the subgroup generated by Y.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, low_index_subgroups
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**2, y**3, (x*y)**4])
>>> L = low_index_subgroups(f, 4)
>>> for coset_table in L:
... print(coset_table.table)
[[0, 0, 0, 0]]
[[0, 0, 1, 2], [1, 1, 2, 0], [3, 3, 0, 1], [2, 2, 3, 3]]
[[0, 0, 1, 2], [2, 2, 2, 0], [1, 1, 0, 1]]
[[1, 1, 0, 0], [0, 0, 1, 1]]
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
Section 5.4
.. [2] Marston Conder and Peter Dobcsanyi
"Applications and Adaptions of the Low Index Subgroups Procedure"
"""
C = CosetTable(G, [])
R = G.relators
# length chosen for the length of the short relators
len_short_rel = 5
# elements of R2 only checked at the last step for complete
# coset tables
R2 = {rel for rel in R if len(rel) > len_short_rel}
# elements of R1 are used in inner parts of the process to prune
# branches of the search tree,
R1 = {rel.identity_cyclic_reduction() for rel in set(R) - R2}
R1_c_list = C.conjugates(R1)
S = []
descendant_subgroups(S, C, R1_c_list, C.A[0], R2, N, Y)
return S
|
03dc48ada37302ca6d4bc5054660aae60bca2ca5
| 23,874 |
def ifft_complex(fft_sig_complex) -> np.ndarray:
"""
Compute the one-dimensional inverse discrete Fourier Transform.
:param fft_sig_complex: input array, can be complex.
:return: the truncated or zero-padded input, transformed along the axis
"""
ifft_sig = np.fft.ifft(fft_sig_complex)
fft_points = len(ifft_sig)
ifft_sig *= fft_points
return ifft_sig
|
101189d4116b0d968ee015534ab8b8fc0020a769
| 23,875 |
def merge_eopatches(*eopatches, features=..., time_dependent_op=None, timeless_op=None):
""" Merge features of given EOPatches into a new EOPatch
:param eopatches: Any number of EOPatches to be merged together
:type eopatches: EOPatch
:param features: A collection of features to be merged together. By default all features will be merged.
:type features: object
:param time_dependent_op: An operation to be used to join data for any time-dependent raster feature. Before
joining time slices of all arrays will be sorted. Supported options are:
- None (default): If time slices with matching timestamps have the same values, take one. Raise an error
otherwise.
- 'concatenate': Keep all time slices, even the ones with matching timestamps
- 'min': Join time slices with matching timestamps by taking minimum values. Ignore NaN values.
- 'max': Join time slices with matching timestamps by taking maximum values. Ignore NaN values.
- 'mean': Join time slices with matching timestamps by taking mean values. Ignore NaN values.
- 'median': Join time slices with matching timestamps by taking median values. Ignore NaN values.
:type time_dependent_op: str or Callable or None
:param timeless_op: An operation to be used to join data for any timeless raster feature. Supported options
are:
- None (default): If arrays are the same, take one. Raise an error otherwise.
- 'concatenate': Join arrays over the last (i.e. bands) dimension
- 'min': Join arrays by taking minimum values. Ignore NaN values.
- 'max': Join arrays by taking maximum values. Ignore NaN values.
- 'mean': Join arrays by taking mean values. Ignore NaN values.
- 'median': Join arrays by taking median values. Ignore NaN values.
:type timeless_op: str or Callable or None
:return: A dictionary with EOPatch features and values
:rtype: Dict[(FeatureType, str), object]
"""
reduce_timestamps = time_dependent_op != 'concatenate'
time_dependent_op = _parse_operation(time_dependent_op, is_timeless=False)
timeless_op = _parse_operation(timeless_op, is_timeless=True)
all_features = {feature for eopatch in eopatches for feature in FeatureParser(features)(eopatch)}
eopatch_content = {}
timestamps, sort_mask, split_mask = _merge_timestamps(eopatches, reduce_timestamps)
eopatch_content[FeatureType.TIMESTAMP] = timestamps
for feature in all_features:
feature_type, feature_name = feature
if feature_type.is_raster():
if feature_type.is_time_dependent():
eopatch_content[feature] = _merge_time_dependent_raster_feature(
eopatches, feature, time_dependent_op, sort_mask, split_mask
)
else:
eopatch_content[feature] = _merge_timeless_raster_feature(eopatches, feature,
timeless_op)
if feature_type.is_vector():
eopatch_content[feature] = _merge_vector_feature(eopatches, feature)
if feature_type is FeatureType.META_INFO:
eopatch_content[feature] = _select_meta_info_feature(eopatches, feature_name)
if feature_type is FeatureType.BBOX:
eopatch_content[feature] = _get_common_bbox(eopatches)
return eopatch_content
|
6328528c6b6fc3013db6aa17f0153354230efd4b
| 23,876 |
import math
def dispos(dra0, decd0, dra, decd):
"""
Source/credit: Skycat
dispos computes distance and position angle solving a spherical
triangle (no approximations)
INPUT :coords in decimal degrees
OUTPUT :dist in arcmin, returns phi in degrees (East of North)
AUTHOR :a.p.martinez
Parameters:
dra0: center RA decd0: center DEC dra: point RA decd: point DEC
Returns:
distance in arcmin
"""
radian = 180.0/math.pi
# coord transformed in radians
alf = dra / radian
alf0 = dra0 / radian
del_ = decd / radian
del0 = decd0 / radian
sd0 = math.sin(del0)
sd = math.sin(del_)
cd0 = math.cos(del0)
cd = math.cos(del_)
cosda = math.cos(alf - alf0)
cosd = sd0*sd + cd0*cd*cosda
dist = math.acos(cosd)
phi = 0.0
if dist > 0.0000004:
sind = math.sin(dist)
cospa = (sd*cd0 - cd*sd0*cosda)/sind
#if cospa > 1.0:
# cospa=1.0
if math.fabs(cospa) > 1.0:
# 2005-06-02: fix from [email protected]
cospa = cospa/math.fabs(cospa)
sinpa = cd*math.sin(alf-alf0)/sind
phi = math.acos(cospa)*radian
if sinpa < 0.0:
phi = 360.0-phi
dist *= radian
dist *= 60.0
if decd0 == 90.0:
phi = 180.0
if decd0 == -90.0:
phi = 0.0
return (phi, dist)
|
5c1b7c79a82f59764fd43ba0d89a763955b09a04
| 23,878 |
import socket
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
except OSError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
|
a326581bea0f0873292028a5e39a710ea89fde4b
| 23,879 |
from typing import Union
def node_to_html(node: Union[str, NodeElement, list]) -> str:
"""
Convert Nodes to HTML
:param node:
:return:
"""
if isinstance(node, str): # Text
return escape(node)
elif isinstance(node, list): # List of nodes
result = ''
for child_node in node:
result += node_to_html(child_node)
return result
elif not isinstance(node, NodeElement):
raise TypeError(f"Node must be instance of str or NodeElement, not {type(node)}")
# NodeElement
# Open
result = "<" + node.tag
if node.attrs:
result += ' ' + ' '.join(f"{k}=\"{v}\"" for k, v in node.attrs.items())
if node.tag in VOID_ELEMENTS: # Close void element
result += '/>'
else:
result += '>'
for child_node in node.children: # Container body
result += node_to_html(child_node)
result += '</' + node.tag + '>' # Close tag
return result
|
0366dffc181f27ac10cbae8d9eae65b6822371c6
| 23,880 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.