content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def getChanprofIndex(chanprof, profile, chanList):
""" List of indices into the RTTOV chanprof(:) array corresponding to the chanlist.
NB This assumes you've checked the chanlist against chanprof already.
"""
ilo = sum(map(len, chanprof[:profile-1]))
ichanprof = []
for c in chanList:
ichanprof.append(ilo + chanprof[profile-1].index(c))
return ichanprof
|
e61e210e8b05fdfbf3a769f4b5b388d765d436b9
| 33,168 |
def _get_blob_size_string(blob_key):
"""Return blob size string."""
blob_size = blobs.get_blob_size(blob_key)
if blob_size is None:
return None
return utils.get_size_string(blob_size)
|
5f223101c71d641540aef97bda1b59f3bc7dfa7c
| 33,169 |
def masked_softmax_full(input_layer, n_nodes, batch_size):
"""
A Lambda layer to compute a lower-triangular version of the full adjacency.
Each row must sum up to one. We apply a lower triangular mask of ones
and then add an upper triangular mask of a large negative number.
After that we return the full adjacency matrix.
Parameters
----------
input_layer: keras layer object
(n x 1, n) matrix
Returns
-------
output_layer: keras layer object
(n x 1, n) matrix
"""
mask_layer = masked_softmax(input_layer, n_nodes, batch_size)
mask_layer = \
K.concatenate([K.zeros(shape=[batch_size, 1, n_nodes]), mask_layer],
axis=1)
result, updates = \
K.theano.scan(fn=lambda n: full_matrix(mask_layer[n, :, :], n_nodes),
sequences=K.arange(batch_size))
return result[:, 1:, :]
|
d48d48a90cb8ac80614ea54ee3a3f2e56def3a69
| 33,171 |
import collections
import socket
def match_backends_and_tasks(backends, tasks):
"""Returns tuples of matching (backend, task) pairs, as matched by IP and port. Each backend will be listed exactly
once, and each task will be listed once per port. If a backend does not match with a task, (backend, None) will
be included. If a task's port does not match with any backends, (None, task) will be included.
:param backends: An iterable of haproxy backend dictionaries, e.g. the list returned by
smartstack_tools.get_multiple_backends.
:param tasks: An iterable of MarathonTask objects.
"""
backends_by_ip_port = collections.defaultdict(list) # { (ip, port) : [backend1, backend2], ... }
backend_task_pairs = []
for backend in backends:
ip, port, _ = ip_port_hostname_from_svname(backend['svname'])
backends_by_ip_port[ip, port].append(backend)
for task in tasks:
ip = socket.gethostbyname(task.host)
for port in task.ports:
for backend in backends_by_ip_port.pop((ip, port), [None]):
backend_task_pairs.append((backend, task))
# we've been popping in the above loop, so anything left didn't match a marathon task.
for backends in backends_by_ip_port.values():
for backend in backends:
backend_task_pairs.append((backend, None))
return backend_task_pairs
|
9fab7e8b8f1a3c7a3cdbfb271fc5a4aac4807b03
| 33,172 |
def _drop_path(x, keep_prob):
"""
Drops out a whole example hiddenstate with
the specified probability.
"""
batch_size = tf.shape(x)[0]
noise_shape = [batch_size, 1, 1, 1]
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32)
binary_tensor = tf.floor(random_tensor)
x = tf.div(x, keep_prob) * binary_tensor
return x
|
a91d308a91fbf328c472c2758dd080bab1b3ee4c
| 33,173 |
import yaml
def read_pipeline_definition(file_path):
"""Function reads the yaml pipeline definitions.
Function reads the yaml pipeline definitions. We also remove the variables
key as that was only used for yaml placeholders.
Args:
file_path (str): Path to the pipeline definition.
Returns:
dict: parsed yaml definition as dictionary.
Raises:
ETLInputError: If `file_path` extention is not yaml
"""
extension = file_path.split('.').pop()
if extension != 'yaml':
raise ETLInputError('Pipeline definition should have a yaml extention')
with open(file_path) as f:
definition = yaml.load(f.read())
# remove the variables key from the pipeline definition
# http://stackoverflow.com/questions/4150782/using-yaml-with-variables
definition.pop('variables', None)
return definition
|
ea6ee7e8fcd14ffb30bca48b5f9505bc49657d0c
| 33,174 |
import io
def detect_text(path):
"""Detects text in the file."""
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
print('Texts:')
# Prints the complete text
try:
print(str(texts[0].description))
# Outputs text to output_string, the * acts as a multiplier to boost
# classification
output_string = str(texts[0].description)*SENSITIVITY
except:
print('Text not detected')
output_string = ""
return output_string
|
22993db37ca8d858a9c4e1fbf866da9deca67528
| 33,175 |
import bisect
def find_dataset_ind(windows_ds, win_ind):
"""Taken from torch.utils.data.dataset.ConcatDataset.
"""
return bisect.bisect_right(windows_ds.cumulative_sizes, win_ind)
|
76abcbdf9718cc59f1d2b7ca8daacc062970b253
| 33,176 |
def from_moment(w):
"""Converts a moment representation w to a 3D rotation matrix."""
length = vectorops.norm(w)
if length < 1e-7: return identity()
return rotation(vectorops.mul(w,1.0/length),length)
|
3a5adf11665cd32dbebde158fbee5939e5654f18
| 33,177 |
def add_menu(data):
"""
新增
:param data:
:return:
"""
i = SysMenu.insert(data).execute()
return i
|
07173c3648bbb3ed957f549420e80bcb1c539f48
| 33,178 |
def compute_distance_matrix(m1: np.ndarray, m2: np.ndarray,
dist_func: np.ndarray, row_wise: bool = False) \
-> np.ndarray:
"""
Function for computing the pair-wise distance matrix between two arrays of
vectors. Both matrices must have the same number of columns.
"""
if m1.ndim == 1:
m1 = m1[np.newaxis]
if m2.ndim == 1:
m2 = m2[np.newaxis]
k = np.ndarray((m1.shape[0], m2.shape[0]), dtype=float)
if row_wise:
# row wise
for i in range(m1.shape[0]):
k[i, :] = dist_func(m1[i], m2)
else:
for i in range(m1.shape[0]):
for j in range(m2.shape[0]):
k[i, j] = dist_func(m1[i], m2[j])
return k
|
d044daaca81e9dc0ec186195493f1182a8209a1a
| 33,179 |
from typing import List
from typing import Iterator
from typing import Any
def term_table(
strings: List[str], row_wise: bool = False, filler: str = "~"
) -> Iterator[Any]:
"""
:param strings:
:param row_wise:
:param filler:
:return:
"""
max_str_len = max(len(str) for str in strings) + 5
terminal_cols = get_terminal_size((80, 20)).columns
n_cols = terminal_cols // max_str_len
n_rows = int(ceil(len(strings) / n_cols))
spaces = " " * ((terminal_cols - (max_str_len * n_cols)) // n_cols)
size_string = "{:<" + str(max_str_len) + "}" + spaces
fmtstring = size_string * (n_cols - 1) + "{:<}"
strings.extend(filler for _ in range(n_rows * n_cols - len(strings)))
if row_wise:
line_iter = zip(*(strings[i::n_cols] for i in range(n_cols)))
else:
line_iter = (strings[i::n_rows] for i in range(n_rows))
return (fmtstring.format(*row) for row in line_iter)
|
47fe2d7fc63490b99f03acb57b08eabbe9e1b20c
| 33,180 |
def create_pipelines_lingspam():
"""Reproduces the pipelines evaluated in the LingSpam paper.
I. Androutsopoulos, J. Koutsias, K.V. Chandrinos, George Paliouras,
and C.D. Spyropoulos, "An Evaluation of Naive Bayesian Anti-Spam
Filtering". In Potamias, G., Moustakis, V. and van Someren, M. (Eds.),
Proceedings of the Workshop on Machine Learning in the New Information
Age, 11th European Conference on Machine Learning (ECML 2000),
Barcelona, Spain, pp. 9-17, 2000.
Diferences: use of lemmatization instead of stemming.
"""
stop = ('stop', StopWordRemovalTransformer())
lemma = ('lemma', LemmatizeTransformer())
binz = ('binarizer', CountVectorizer())
we = ('document embedding', DocEmbeddingVectorizer())
sel = ('fsel', SelectKBest(score_func=mutual_info_classif, k=100))
clf = ('cls', BernoulliNB()) # Binary features in the original paper.
return Pipeline([binz, sel, clf]), \
Pipeline([stop, binz, sel, clf]), \
Pipeline([lemma, binz, sel, clf]), \
Pipeline([stop, lemma, binz, sel, clf]), \
Pipeline([stop, lemma, we, sel, clf])
|
3d691c869e1b92f16d892e7d87cc59bad055d2a0
| 33,181 |
def band_dos_plain_spin_polarized(
band_folder,
dos_folder,
output='band_dos_plain_sp.png',
up_color='black',
down_color='red',
linewidth=1.25,
up_linestyle='-',
down_linestyle=':',
figsize=(6, 3),
width_ratios=[7, 3],
erange=[-6, 6],
kpath=None,
custom_kpath=None,
n=None,
unfold=False,
M=None,
high_symm_points=None,
fontsize=12,
save=True,
shift_efermi=0,
interpolate=False,
new_n=200,
soc_axis=None,
fill=True,
alpha=0.3,
sigma=0.05,
):
"""
This function plots a plain spin polarized band structure and density of states next to eachother.
Parameters:
band_folder (str): This is the folder that contains the VASP files for the band structure calculation
dos_folder (str): This is the folder that contains the VASP files for the density of states calculation
output (str): File name of the resulting plot.
up_color (str): Color of the spin up band structure lines
down_color (str): Color of the spin down band structure lines
linewidth (float): Line width of the band structure lines
up_linestyle (str): Line style of the spin up bands
down_linestyle (str): Line style of the spin down bands
figsize (list / tuple): Desired size of the image in inches (width, height)
width_ratios (list / tuple): Width ration of the band plot and dos plot.
erange (list / tuple): Range of energy to show in the plot [low, high]
kpath (list[list]): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for unfolded calculations this
information is a required input for proper labeling of the figure
for unfolded calculations. This information is extracted from the KPOINTS
files for non-unfolded calculations. (G is automatically converted to \\Gamma)
(e.g. For the path X-G-X, kpath=[['X', 'G'], ['G', 'X']])
custom_kpath (list): This gives the option to only plot specific segments of a given band structure
calculation. For example if the kpath was G-X-W-L then there are three segements to choose from:
G-X, X-W, and W-L. In this case the default kpath could be plotted by defining custom_kpath=[1,2,3],
where 1 -> G-X, 2 -> X-W, and 3 -> W-L. If only G-X and X-W were desired then custom_kpath=[1,2].
If one of the segements should be flipped it can be done by making its value negative
(e.g. -1 -> X-G, -2 -> W-X, -3 -> L-W)
n (int): Number of points between each high symmetry points.
This is also only required for unfolded calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
fill (bool): Determines wether or not to fill underneath the plot
alpha (float): Alpha value for the fill
sigma (float): Standard deviation for gaussian filter
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing.
"""
fig, ax = plt.subplots(
nrows=1,
ncols=2,
sharey=True,
figsize=figsize,
dpi=400,
gridspec_kw={'width_ratios': width_ratios}
)
ax1, ax2 = _figure_setup_band_dos(
ax=ax,
fontsize=fontsize,
ylim=[erange[0], erange[1]]
)
band_up = Band(
folder=band_folder,
spin='up',
unfold=unfold,
high_symm_points=high_symm_points,
interpolate=interpolate,
new_n=new_n,
soc_axis=soc_axis,
kpath=kpath,
custom_kpath=custom_kpath,
n=n,
M=M,
shift_efermi=shift_efermi,
)
band_down = Band(
folder=band_folder,
spin='down',
unfold=unfold,
high_symm_points=high_symm_points,
interpolate=interpolate,
new_n=new_n,
soc_axis=soc_axis,
kpath=kpath,
custom_kpath=custom_kpath,
n=n,
M=M,
shift_efermi=shift_efermi,
)
dos_up = Dos(shift_efermi=shift_efermi, folder=dos_folder, spin='up')
dos_down = Dos(shift_efermi=shift_efermi, folder=dos_folder, spin='down')
band_up.plot_plain(
sp_scale_factor=0,
ax=ax1,
color=up_color,
linewidth=linewidth,
linestyle=up_linestyle,
erange=erange,
)
band_down.plot_plain(
sp_scale_factor=0,
ax=ax1,
color=down_color,
linewidth=linewidth,
linestyle=down_linestyle,
erange=erange,
)
dos_up.plot_plain(
ax=ax2,
linewidth=linewidth,
fill=fill,
alpha=alpha,
sigma=sigma,
energyaxis='y',
color=up_color,
erange=erange,
)
dos_down.plot_plain(
ax=ax2,
linewidth=linewidth,
fill=fill,
alpha=alpha,
sigma=sigma,
energyaxis='y',
color=down_color,
erange=erange,
)
fig.canvas.draw()
nbins = len(ax2.get_xticklabels())
ax2.xaxis.set_major_locator(MaxNLocator(nbins=nbins - 1, prune='lower'))
plt.tight_layout(pad=0.4)
plt.subplots_adjust(wspace=0)
if save:
plt.savefig(output)
else:
return fig, ax1, ax2
|
8a00300828a0a9672edf025fcf4ca2a7c3264c96
| 33,182 |
def _unsigned16(data, littleEndian=False):
"""return a 16-bit unsigned integer with selectable Endian"""
assert len(data) >= 2
if littleEndian:
b0 = data[1]
b1 = data[0]
else:
b0 = data[0]
b1 = data[1]
val = (b0 << 8) + b1
return val
|
22feb074aca7f4ab7d489eacb573c3653cad9272
| 33,183 |
def calculate_term_frequencies(tokens):
"""Given a series of `tokens`, produces a sorted list of tuples in the
format of (term frequency, token).
"""
frequency_dict = {}
for token in tokens:
frequency_dict.setdefault(token, 0)
frequency_dict[token] += 1
tf = []
for token, count in frequency_dict.items():
tf.append( (count, token) )
return sorted(tf, reverse=True)
|
b764175cd59fe25c4a87576faee2a76273097c5e
| 33,184 |
def max(*l):
"""
Element-wise max of each of the input tensors (with Numpy-style broadcasting support).
Args:
*x (a list of Tensor): List of tensors for max.
Returns:
Tensor, the output
"""
return Max()(*l)[0]
|
9467af70178c17d8bfa6404ca4969f6aae22ef2f
| 33,185 |
def _edr_peak_trough_mean(ecg: pd.Series, peaks: np.array, troughs: np.array) -> np.array:
"""Estimate respiration signal from ECG based on `peak-trough-mean` method.
The `peak-trough-mean` method is based on computing the mean amplitude between R peaks (`peaks`) and
minima before R peaks (`troughs`).
Parameters
----------
ecg : :class:`~pandas.Series`
pandas series with ecg signal
peaks : :class:`~numpy.array`
array with peak indices
troughs : :class:`~numpy.array`
array with trough indices
Returns
-------
:class:`~numpy.array`
estimated raw respiration signal
"""
peak_vals = np.array(ecg.iloc[peaks])
trough_vals = np.array(ecg.iloc[troughs])
return np.mean([peak_vals, trough_vals], axis=0)
|
9f2a5fdbe9d27d0461133757157ef034fe46e2af
| 33,186 |
import joblib
def feature_stacking(n_splits=CV, random_state=None, use_proba=False, verbose=False, drop_words=0.):
"""
Args:
n_splits: n_splits for KFold
random_state: random_state for KFlod
use_proba: True to predict probabilities of labels instead of labels
verbose: True to print more info
drop_words: drop_words for run_parallel
Returns:
X, y, X_test
"""
clf = LinearSVC()
train_url = from_project_root("data/train_set.csv")
test_url = from_project_root("data/test_set.csv")
# test_url = None
X, y, X_test = generate_vectors(train_url, test_url, sublinear_tf=False) # for X.shape
params_list = load_params()
parallel = joblib.Parallel(n_jobs=N_JOBS, verbose=True)
rets = parallel(joblib.delayed(run_parallel)(
ind, train_url, test_url, params, clone(clf), n_splits, random_state, use_proba, verbose, drop_words
) for ind, params in enumerate(params_list))
rets = sorted(rets, key=lambda x: x[0])
X_stack_train = np.empty((X.shape[0], 0), float)
X_stack_test = np.empty((X_test.shape[0], 0), float)
for ind, y_pred, y_pred_test in rets:
X_stack_train = np.append(X_stack_train, y_pred, axis=1)
X_stack_test = np.append(X_stack_test, y_pred_test, axis=1)
return X_stack_train, y, X_stack_test
|
7af6d07dabd39ff27dcf66dc0d9d41cc30eefb70
| 33,187 |
import traceback
import json
def data(request):
"""
[メソッド概要]
アクション履歴画面の一覧表示
"""
logger.logic_log('LOSI00001', 'none', request=request)
msg = ''
lang = request.user.get_lang_mode()
ita_flg = False
mail_flg = False
servicenow_flg = False
filter_info = {
'tblname' : None,
'rulename' : None,
}
interval = request.GET.get('reload', None)
if not hasattr(request.session, 'user_config'):
request.session['user_config'] = {}
if not hasattr(request.session['user_config'], 'action_history_interval'):
request.session['user_config']['action_history_interval'] = 0
if interval is not None:
request.session['user_config']['action_history_interval'] = interval
try:
# アクション画面のルール別アクセス権限を取得
permission_info = request.user_config.get_rule_auth_type(MENU_ID)
# アクセス可能なルール種別IDを取得
rule_ids_view = permission_info[defs.VIEW_ONLY]
rule_ids_admin = permission_info[defs.ALLOWED_MENTENANCE]
rule_ids_all = rule_ids_view + rule_ids_admin
# アクション種別管理
action_type_list = ActionType.objects.all()
# アクションステータス管理
action_status_dict = defs.ACTION_STATUS
# ドライバ種別を取得
driver_type_list = DriverType.objects.all()
# ドライバインストール確認
for act_type in action_type_list:
if act_type.driver_type_id == 1 and act_type.disuse_flag == '0':
ita_flg = True
elif act_type.driver_type_id == 2 and act_type.disuse_flag == '0':
mail_flg = True
elif act_type.driver_type_id == 3 and act_type.disuse_flag == '0':
servicenow_flg = True
# アクション履歴を取得
action_history = ActionHistory.objects.filter(rule_type_id__in=rule_ids_all).order_by('-pk') if len(rule_ids_all) > 0 else []
action_history_list = []
# 表示用データ整備
for act in action_history:
# ルール種別の削除フラグを確認
act.disuse_flag = RuleType.objects.get(rule_type_id=act.rule_type_id).disuse_flag
# アイコン表示用文字列セット
status = act.status
if act.retry_status is not None:
status = act.retry_status
if status in defs.ACTION_HISTORY_STATUS.ICON_INFO:
#承認中のものが削除された場合は処理済みとして取り扱う
if act.disuse_flag != '0' and status == 6:
act.tmp = defs.ACTION_HISTORY_STATUS.ICON_INFO[8]
else:
act.tmp = defs.ACTION_HISTORY_STATUS.ICON_INFO[status]
else:
act.tmp = {'status':'attention','name':'owf-attention','description':'MOSJA13063'}
act.class_info = {'status':'','name':'','description':''}
act.class_info['status'] = act.tmp['status']
act.class_info['name'] = act.tmp['name']
act.class_info['description'] = get_message(act.tmp['description'], lang, showMsgId=False)
if act.action_type_id != defs.NO_ACTION:
for type in action_type_list:
if type.action_type_id == act.action_type_id:
for driver in driver_type_list:
if type.driver_type_id == driver.driver_type_id:
driver_name = driver.name + '(ver' + str(driver.driver_major_version) + ')'
else:
driver_name = get_message('MOSJA11159', lang, showMsgId=False)
table_info = {
'pk' : act.pk,
'response_id' : act.response_id,
'execution_order' : act.execution_order,
'disuse_flag' : act.disuse_flag,
'can_update' : rule_ids_admin,
'rule_type_id' : act.rule_type_id,
'rule_type_name' : act.rule_type_name,
'rule_name' : act.rule_name,
'incident_happened' : act.incident_happened,
'handling_summary' : act.handling_summary,
'driver_name' : driver_name,
'status' : act.status,
'retry_status' : act.retry_status,
'class_info' : act.class_info,
'action_start_time' : act.action_start_time,
'last_update_timestamp' : act.last_update_timestamp,
'last_act_user' : act.last_update_user
}
action_history_list.append(table_info)
except Exception as e:
msg = get_message('MOSJA13000', request.user.get_lang_mode())
logger.logic_log('LOSM05000', 'traceback: %s' % traceback.format_exc(), request=request)
logger.logic_log('LOSI00002', 'none', request=request)
response_json = json.dumps(action_history_list, default=json_serial)
return HttpResponse(response_json, content_type="application/json")
|
e5292200699d169693d55e3032a47260258db5bc
| 33,188 |
def get_default():
"""Get the configuration from the source code"""
return {name: dict(block()._asdict()) for name, _, block in triples}
|
00279671d46b95dd85c307d2d690f5215d9e0a99
| 33,189 |
def npulses(image_number,passno=0):
"""How many X-ray bursts to send to the sample as function of image
number. image_number is 1-based, passno is 0-based.
"""
# When using sample translation the exposure may be boken up
# into several passes.
if passno != None: npulses = npulses_of_pass(image_number,passno)
else:
# Methods-based data collection
mode = SAXS_WAXS_methods.Ensemble_mode.value
burst_length = Ensemble_SAXS.burst_length_of_mode(mode)
passes = SAXS_WAXS_methods.passes_per_image.value
npulses = burst_length*passes
return npulses
|
485c2b27979a0b25ca4449b538f12554d2f12938
| 33,190 |
import torch
from typing import Optional
def topk__dynamic(ctx,
input: torch.Tensor,
k: int,
dim: Optional[int] = None,
largest: bool = True,
sorted: bool = True):
"""Rewrite `topk` for default backend.
Cast k to tensor and makesure k is smaller than input.shape[dim].
"""
if dim is None:
dim = int(input.ndim - 1)
size = input.shape[dim]
if not isinstance(k, torch.Tensor):
k = torch.tensor(k, device=input.device, dtype=torch.long)
# Always keep topk op for dynamic input
if isinstance(size, torch.Tensor):
size = size.to(input.device)
k = torch.where(k < size, k, size)
return ctx.origin_func(input, k, dim=dim, largest=largest, sorted=sorted)
|
f01ac53e2b7b5a1cef4ff55b0066bff7e9846f7f
| 33,191 |
def GetLocalNodeId() -> int:
"""Returns the current local node id. If none has been set, a default is set and
used."""
global _local_node_id
if _local_node_id is None:
SetLocalNodeId(DEFAULT_LOCAL_NODE_ID)
return _local_node_id
|
500c2795eade3e0854f23fbfb99e82796b98c7ef
| 33,192 |
def get_dlons_from_case(case: dict):
"""pull list of latitudes from test case"""
dlons = [geo[1] for geo in case["destinations"]]
return dlons
|
666ab789761e99749b4852a51f5d38c35c66bd2a
| 33,193 |
def account_info(info):
"""Extract user information from IdP response"""
return dict(
user=dict(
email=info['User.email'][0],
profile=dict(
username=info['User.FirstName'][0],
full_name=info['User.FirstName'][0])),
external_id=info['User.email'][0],
external_method='onelogin',
active=True)
|
1e3141e4ca84b935af67078d36e035c6c94bcefc
| 33,194 |
from typing import Concatenate
def MangoYOLO(inputs, num_anchors, num_classes, **kwargs):
"""Create Tiny YOLO_v3 model CNN body in keras."""
x1 = compose(
DarknetConv2D_BN_Leaky(16, (3,3), **kwargs),
DarknetConv2D_BN_Leaky(16, (3, 3), strides=2, **kwargs),
DarknetConv2D_BN_Leaky(32, (3,3), **kwargs),
DarknetConv2D_BN_Leaky(32, (3, 3), strides=2, **kwargs),
DarknetConv2D_BN_Leaky(64, (3,3), **kwargs),
DarknetConv2D_BN_Leaky(64, (3, 3), strides=2, **kwargs),
DarknetConv2D_BN_Leaky(128, (3,3), **kwargs))(inputs)
x2 = compose(
DarknetConv2D_BN_Leaky(128, (3, 3), strides=2, **kwargs),
DarknetConv2D_BN_Leaky(256, (3, 3), **kwargs),
)(x1)
x3 = compose(
DarknetConv2D_BN_Leaky(256, (3, 3), strides=2, **kwargs),
DarknetConv2D_BN_Leaky(512, (3, 3), **kwargs)
)(x2)
y1 = compose(
DarknetConv2D_BN_Leaky(1024, (3, 3), **kwargs),
DarknetConv2D_BN_Leaky(512, (3, 3), **kwargs),
DarknetConv2D(num_anchors * (num_classes + 5), (1, 1), name="y1")
)(x3)
x4 = compose(
DarknetConv2D_BN_Leaky(256, (1,1), **kwargs),
UpSampling2D(2)
)(x3)
y2 = compose(
Concatenate(),
DarknetConv2D_BN_Leaky(256, (1,1), **kwargs),
DarknetConv2D_BN_Leaky(512, (3, 3), **kwargs),
DarknetConv2D_BN_Leaky(256, (1, 1), **kwargs),
DarknetConv2D(num_anchors*(num_classes+5), (1,1), name="y2")
)([x2, x4])
x5 = compose(
DarknetConv2D_BN_Leaky(128, (1, 1), **kwargs),
UpSampling2D(2)
)(y2)
y3 = compose(
Concatenate(),
DarknetConv2D_BN_Leaky(128, (1, 1), **kwargs),
DarknetConv2D_BN_Leaky(256, (3, 3), **kwargs),
DarknetConv2D_BN_Leaky(128, (1, 1), **kwargs),
DarknetConv2D(num_anchors * (num_classes + 5), (1, 1), name="y3")
)([x5, x1])
return Model(inputs, [y1, y2, y3], name="MangoYOLO")
|
b9d1eba1407e5cd5e0037bd968e0d897153e31c0
| 33,195 |
def zone_max_matching(plan_a, plan_b):
"""
Determines the optimal bipartite matching of
districts in plan_a to districts in plan_b
to maximize the total population overlap.
Both plans should have districts indexed from 1 to k,
where k is some positive integer.
Based on the concept of "reference zones"
as defined in Pereira et al. (2009).
Returns a list ref_zones representing
a bijection from zones in plan_a to zones in plan_b.
Note ref_zones is indexed 0 to k, whereas
the districts are indexed 1 to k, so
ref_zones[0] is unused.
"""
assert(len(plan_a.parts) == len(plan_b.parts))
k = len(plan_a.parts)
overlap_counts = np.zeros((len(plan_a.parts), len(plan_b.parts)))
for zone_a in plan_a.parts:
for zone_b in plan_b.parts:
intersection = plan_a.parts[zone_a].intersection(plan_b.parts[zone_b])
df = plan_a.graph.data
df = df.loc[df.index.isin(intersection)]
overlap_counts[zone_a - 1, zone_b - 1] = df['population'].sum()
max_matching = max_matching_helper(overlap_counts)
ref_zones = [0] + max_matching
return ref_zones
|
a710b27540ddff5c352374c62e074a2eed9ce39e
| 33,196 |
def dyn_sim_feedback_discrete_time(A, Bu, Bd, x0, u, d, t_series, K):
"""
Simulate discrete-time ODE
Args:
A: discrete-time A
Bu: discrete-time B for control
Bd: discrete-time B for disturbance
x0: Initial condition in numpy array nx*1
u: Control signal in numpy array, [[u]] if constrant
d: Disturbance signal in numpy array, [[d]] if constrant
t_series: Time series
Returns: Integration results in numpy array
"""
steps = len(t_series)
nx = x0.shape[0]
nu = u.shape[0]
nd = d.shape[0]
Xt = np.zeros((nx, steps))
Ut = np.zeros((nu, steps))
Dt = np.zeros((nd, steps))
if d.shape[1] == 1:
for ii in range(steps):
Xt[:, [ii]] = x0
Ut[:, [ii]] = K @ x0
x1 = A @ x0 + Bu @ K @ x0 + Bd @ d[:, [0]]
x0 = x1
elif d.shape[1] > 1:
for ii in range(steps):
Xt[:, [ii]] = x0
Ut[:, [ii]] = K @ x0
x1 = A @ x0 + Bu @ K @ x0 + Bd @ d[:, [ii]]
x0 = x1
else:
print("Disturbance dimensions do not match")
return Xt, Ut, Dt
|
9e77bdd04f03e8b2f20d204188d1d83ca325932c
| 33,197 |
import logging
def get_recordings(mysql, symbol_id):
"""
Parameters
----------
mysql : dict
Connection information
symbol_id : int
ID of a symbol on write-math.com
Returns
-------
list :
A list of HandwrittenData objects
"""
connection = pymysql.connect(host=mysql['host'],
user=mysql['user'],
passwd=mysql['passwd'],
db=mysql['db'],
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor()
# Get the data
recordings = []
sql = ("SELECT `id`, `data`, `is_in_testset`, `wild_point_count`, "
"`missing_line`, `user_id` "
"FROM `wm_raw_draw_data` "
"WHERE `accepted_formula_id` = %s" % str(symbol_id))
cursor.execute(sql)
raw_datasets = cursor.fetchall()
for raw_data in raw_datasets:
try:
handwriting = HandwrittenData(raw_data['data'],
symbol_id,
raw_data['id'],
"no formula in latex",
raw_data['wild_point_count'],
raw_data['missing_line'],
raw_data['user_id'])
recordings.append(handwriting)
except Exception as e:
logging.info("Raw data id: %s", raw_data['id'])
logging.info(e)
return recordings
|
c6e64b51353ae210c67adffd372b45a01847342d
| 33,199 |
def horiLine(lineLength, lineWidth=None, lineCharacter=None, printOut=None):
"""Generate a horizontal line.
Args:
lineLength (int): The length of the line or how many characters the line will have.
lineWidth (int, optional): The width of the line or how many lines of text the line will take space. Defaults to 1.
lineCharacter (str, optional): The string, character, or number the line will use as a single character. Defaults to '-'.
printOut (bool, optional): Print out the generated dummy text. Defaults to False.
Returns:
The horizontal line created.
"""
meaningless_text = ""
lineGenerated = ""
# check if lineWidth is none
if lineWidth is None:
# if lineWidth is none, set it to default of 1
width = 1
else:
# if line wdith is not none, set it to the given value
width = lineWidth
# check if lineCharacter is none
if lineCharacter is None:
# if lineCharacter is none, set it to default "-"
character = "-"
else:
# if line character is not none, then use the user specified character
character = lineCharacter
for i in range(width):
# generate a line
for char in range(lineLength):
lineGenerated += character
if width > 1:
# if line width is greater than 1, append a new line character
lineGenerated += "\n"
meaningless_text += lineGenerated
# check if printOut is not none
if printOut is not None:
# print out is not none and is true so print out the generated text.
if printOut == True:
print(meaningless_text)
# print out is not none and is false so only return the generated text.
return meaningless_text
|
64a4e9e22b480cbe3e038464fe6e0061e023d2c2
| 33,200 |
import struct
import functools
def decrypt(content, salt=None, key=None,
private_key=None, dh=None, auth_secret=None,
keyid=None, keylabel="P-256",
rs=4096, version="aes128gcm"):
"""
Decrypt a data block
:param content: Data to be decrypted
:type content: str
:param salt: Encryption salt
:type salt: str
:param key: local public key
:type key: str
:param private_key: DH private key
:type key: object
:param keyid: Internal key identifier for private key info
:type keyid: str
:param dh: Remote Diffie Hellman sequence (omit for aes128gcm)
:type dh: str
:param rs: Record size
:type rs: int
:param auth_secret: Authorization secret
:type auth_secret: str
:param version: ECE Method version
:type version: enumerate('aes128gcm', 'aesgcm', 'aesgcm128')
:return: Decrypted message content
:rtype str
"""
def parse_content_header(content):
"""Parse an aes128gcm content body and extract the header values.
:param content: The encrypted body of the message
:type content: str
"""
id_len = struct.unpack("!B", content[20:21])[0]
return {
"salt": content[:16],
"rs": struct.unpack("!L", content[16:20])[0],
"keyid": content[21:21 + id_len],
"content": content[21 + id_len:],
}
def decrypt_record(key, nonce, counter, content):
decryptor = Cipher(
algorithms.AES(key),
modes.GCM(iv(nonce, counter), tag=content[-TAG_LENGTH:]),
backend=default_backend()
).decryptor()
return decryptor.update(content[:-TAG_LENGTH]) + decryptor.finalize()
def unpad_legacy(data):
pad_size = versions[version]['pad']
pad = functools.reduce(
lambda x, y: x << 8 | y, struct.unpack(
"!" + ("B" * pad_size), data[0:pad_size])
)
if pad_size + pad > len(data) or \
data[pad_size:pad_size+pad] != (b"\x00" * pad):
raise ECEException(u"Bad padding")
return data[pad_size + pad:]
def unpad(data, last):
i = len(data) - 1
for i in range(len(data) - 1, -1, -1):
v = struct.unpack('B', data[i:i+1])[0]
if v != 0:
if not last and v != 1:
raise ECEException(u'record delimiter != 1')
if last and v != 2:
raise ECEException(u'last record delimiter != 2')
return data[0:i]
raise ECEException(u'all zero record plaintext')
if version not in versions:
raise ECEException(u"Invalid version")
overhead = versions[version]['pad']
if version == "aes128gcm":
try:
content_header = parse_content_header(content)
except Exception:
raise ECEException("Could not parse the content header")
salt = content_header['salt']
rs = content_header['rs']
keyid = content_header['keyid']
if private_key is not None and not dh:
dh = keyid
else:
keyid = keyid.decode('utf-8')
content = content_header['content']
overhead += 16
(key_, nonce_) = derive_key("decrypt", version=version,
salt=salt, key=key,
private_key=private_key, dh=dh,
auth_secret=auth_secret,
keyid=keyid, keylabel=keylabel)
if rs <= overhead:
raise ECEException(u"Record size too small")
chunk = rs
if version != "aes128gcm":
chunk += 16 # account for tags in old versions
if len(content) % chunk == 0:
raise ECEException(u"Message truncated")
result = b''
counter = 0
try:
for i in list(range(0, len(content), chunk)):
data = decrypt_record(key_, nonce_, counter, content[i:i + chunk])
if version == 'aes128gcm':
last = (i + chunk) >= len(content)
result += unpad(data, last)
else:
result += unpad_legacy(data)
counter += 1
except InvalidTag as ex:
raise ECEException("Decryption error: {}".format(repr(ex)))
return result
|
e9a993f1a94bac294d14f21b993b9e59d26ae9e7
| 33,201 |
import math
def _rescale_read_counts_if_necessary(n_ref_reads, n_total_reads,
max_allowed_reads):
"""Ensures that n_total_reads <= max_allowed_reads, rescaling if necessary.
This function ensures that n_total_reads <= max_allowed_reads. If
n_total_reads is <= max_allowed_reads, n_ref_reads and n_total_reads are just
returned. However, if n_total_reads > max_allowed_reads, then n_ref_reads and
n_total_reads are rescaled to new values n_ref_reads' and n_total_reads' so
that n_total_reads' == max_allowed_reads and n_ref_reads' / n_total_reads' ~
n_ref_reads / n_total_reads.
Args:
n_ref_reads: int. Number of reference supporting reads.
n_total_reads: int. Total number of reads.
max_allowed_reads: int. The maximum value allowed for n_total after
rescaling, if necessary.
Returns:
New values for n_ref_reads and n_total_reads.
"""
if n_total_reads > max_allowed_reads:
ratio = n_ref_reads / (1.0 * n_total_reads)
n_ref_reads = int(math.ceil(ratio * max_allowed_reads))
n_total_reads = max_allowed_reads
return n_ref_reads, n_total_reads
|
d09b343cee12f77fa06ab467335a194cf69cccb4
| 33,202 |
def encode_log_entry_to_json(logEntry):
""" Transform the log entry to jason format dict to store into MongoDB """
if logEntry.action == "Query_Success" or "Commit_Success":
# Common fileds for query and commit log entry
json_dict= {"date": logEntry.utcTimestamp,
"action": logEntry.action,
"lampClock": logEntry.lampClock,
"processedTxId": b64encode(logEntry.processedTx.id()),
}
inputAddrKeys = [ b64encode(key) for key in logEntry.inputAddrKeys ]
json_dict.update({"inputAddrKeys" : inputAddrKeys})
inputSigs = [ b64encode(sig) for sig in logEntry.inputSigs]
json_dict.update({"inputSigs" : inputSigs})
inputAddrIds = [{"tx_id": b64encode(addrId.tx_id),
"pos" : addrId.pos
} for addrId in logEntry.processedTx.inTx]
json_dict.update({"inputAddrIds": inputAddrIds})
outputAddrIds = [{"key_id" : b64encode(outAddrId.key_id),
"value": outAddrId.value
} for outAddrId in logEntry.processedTx.outTx ]
json_dict.update({"outputAddrIds": outputAddrIds})
inputTxs = [b64encode(tx) for tx in logEntry.parentTx]
json_dict.update({"inputTxs": inputTxs})
json_dict.update({"processedTx_R": b64encode(logEntry.processedTx.R)})
if logEntry.action == "Commit_Success":
authKeys = [ b64encode(key) for key in logEntry.authKeys ]
json_dict.update({"authKeys" : authKeys})
authSigs = [ b64encode(sig) for sig in logEntry.authSigs ]
json_dict.update({"authSigs" : authSigs})
hashheads = [ b64encode(head) for head in logEntry.hashheads ]
json_dict.update({"hashheads" : hashheads})
seqs = [ int(seq) for seq in logEntry.seqStrs ]
json_dict.update({"seqs" : seqs})
return json_dict
|
4185074194459bb9ba78677a9383d29f92b2b12f
| 33,203 |
def run_backward_rnn(sess, test_idx, test_feat, num_lstm_units):
""" Run backward RNN given a query."""
res_set = []
lstm_state = np.zeros([1, 2 * num_lstm_units])
for test_id in reversed(test_idx):
input_feed = np.reshape(test_feat[test_id], [1, -1])
[lstm_state, lstm_output] = rnn_one_step(
sess, input_feed, lstm_state, direction='b')
for step in range(10):
curr_score = np.exp(np.dot(lstm_output, np.transpose(test_feat)))
curr_score /= np.sum(curr_score)
next_image = np.argsort(-curr_score)[0][0]
# 0.00001 is used as a probablity threshold to stop the generation.
# i.e, if the prob of end-of-set is larger than 0.00001, then stop.
if next_image == test_feat.shape[0] - 1 or curr_score[0][-1] > 0.001:
# print('OVER')
break
else:
input_feed = np.reshape(test_feat[next_image], [1, -1])
[lstm_state, lstm_output] = rnn_one_step(
sess, input_feed, lstm_state, direction='b')
res_set.append(next_image)
return res_set
|
f6c113aed718b23778b75d592ccdcee9210a46bd
| 33,205 |
import functools
def preprocess_xarray(func):
"""Decorate a function to convert all DataArray arguments to pint.Quantities.
This uses the metpy xarray accessors to do the actual conversion.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
args = tuple(a.metpy.unit_array if isinstance(a, xr.DataArray) else a for a in args)
kwargs = {name: (v.metpy.unit_array if isinstance(v, xr.DataArray) else v)
for name, v in kwargs.items()}
return func(*args, **kwargs)
return wrapper
|
dc59d7c4b84cff76584c859354c25a77df6ab9b2
| 33,206 |
def _get_verticalalignment(angle, location, side, is_vertical, is_flipped_x,
is_flipped_y):
"""Return vertical alignment along the y axis.
Parameters
----------
angle : {0, 90, -90}
location : {'first', 'last', 'inner', 'outer'}
side : {'first', 'last'}
is_vertical : bool
is_flipped_x : bool
is_flipped_y : bool
Returns
{'baseline', 'top'}
"""
if is_vertical:
if angle == 0:
if is_flipped_y:
return "top"
else:
return "baseline"
elif angle == 90:
if is_flipped_x:
if (location == "last"
or (side == "first" and location == "inner")
or (side == "last" and location == "outer")):
return "baseline"
else:
return "top"
else:
if (location == "first"
or (side == "first" and location == "outer")
or (side == "last" and location == "inner")):
return "baseline"
else:
return "top"
elif angle == -90:
if is_flipped_x:
if (location == "first"
or (side == "first" and location == "outer")
or (side == "last" and location == "inner")):
return "baseline"
else:
return "top"
else:
if (location == "last"
or (side == "first" and location == "inner")
or (side == "last" and location == "outer")):
return "baseline"
else:
return "top"
else:
if angle == 0:
if is_flipped_y:
if (location == "first"
or (side == "first" and location == "outer")
or (side == "last" and location == "inner")):
return "baseline"
else:
return "top"
else:
if (location == "last"
or (side == "first" and location == "inner")
or (side == "last" and location == "outer")):
return "baseline"
else:
return "top"
elif angle == 90:
if is_flipped_x:
return "baseline"
else:
return "top"
elif angle == -90:
if is_flipped_x:
return "top"
else:
return "baseline"
|
6dfceb74bea740f70f0958192b316c43eb9a2ef7
| 33,207 |
def load_dict(path):
""" Load a dictionary and a corresponding reverse dictionary from the given file
where line number (0-indexed) is key and line string is value. """
retdict = list()
rev_retdict = dict()
with open(path) as fin:
for idx, line in enumerate(fin):
text = line.strip()
retdict.append(text)
rev_retdict[text] = idx
return retdict, rev_retdict
|
31a67c2a28518a3632a47ced2889150c2ce98a78
| 33,210 |
import numpy
def hsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays horizontally (column-wise).
Please refer to the `split` documentation. `hsplit` is equivalent to
`split` with ``axis=1``, the array is always split along the second axis
regardless of the array dimension.
See Also:
split : Split an array into multiple sub-arrays of equal size.
Examples:
>>> poly = numpoly.monomial(8).reshape(2, 4)
>>> poly
polynomial([[1, q, q**2, q**3],
[q**4, q**5, q**6, q**7]])
>>> part1, part2 = numpoly.hsplit(poly, 2)
>>> part1
polynomial([[1, q],
[q**4, q**5]])
>>> part2
polynomial([[q**2, q**3],
[q**6, q**7]])
>>> part1, part2, part3 = numpoly.hsplit(poly, [1, 2])
>>> part1
polynomial([[1],
[q**4]])
>>> part3
polynomial([[q**2, q**3],
[q**6, q**7]])
"""
ary = numpoly.aspolynomial(ary)
results = numpy.hsplit(ary.values, indices_or_sections=indices_or_sections)
return [numpoly.aspolynomial(result, names=ary.indeterminants)
for result in results]
|
cd04cbeb3ac89910289d6f1ddc1809373f896ac6
| 33,211 |
def is_valid_ipv6_addr(input=""):
"""Check if this is a valid IPv6 string.
Returns
-------
bool
A boolean indicating whether this is a valid IPv6 string
"""
assert input != ""
if _RGX_IPV6ADDR.search(input):
return True
return False
|
f866aa5e8e005823ec78edcc9c7dedd923c28c4f
| 33,212 |
def calc_TEC(
maindir,
window=4096,
incoh_int=100,
sfactor=4,
offset=0.0,
timewin=[0, 0],
snrmin=0.0,
):
"""
Estimation of phase curve using coherent and incoherent integration.
Args:
maindir (:obj:`str`): Path for data.
window (:obj:'int'): Window length in samples.
incoh_int (:obj:'int'): Number of incoherent integrations.
sfactor (:obj:'int'): Overlap factor.
offset (:obj:'int'): Overlap factor.
timewin ((:obj:'list'): Overlap factor.)
Returns:
outdict (dict[str, obj]): Output data dictionary::
{
"rTEC": Relative TEC in TECU,
"rTEC_sig":Relative TEC STD in TECU,
"S4": The S4 parameter,
"snr0":snr0,
"snr1":snr1,
"time": Time for each measurement in posix format,
}
"""
e = ephem_doponly(maindir, offset)
resid = calc_resid(maindir, e)
Nr = int((incoh_int + sfactor - 1) * (window / sfactor))
drfObj, chandict, start_indx, end_indx = open_file(maindir)
chans = list(chandict.keys())
sps = chandict[chans[0]]["sps"]
start_indx = start_indx + timewin[0] * sps
end_indx = end_indx - timewin[1] * sps
freq_ratio = chandict[chans[1]]["fo"] / chandict[chans[0]]["fo"]
om0, om1 = (
2.0
* s_const.pi
* sp.array([chandict[chans[0]]["fo"], chandict[chans[1]]["fo"]])
)
start_vec = sp.arange(start_indx, end_indx - Nr, Nr, dtype=float)
tvec = start_vec / sps
soff = window / sfactor
toff = soff / sps
idx = sp.arange(window)
n_t1 = sp.arange(0, incoh_int) * soff
IDX, N_t1 = sp.meshgrid(idx, n_t1)
Msamp = IDX + N_t1
ls_samp = float(Msamp.flatten()[-1])
wfun = sig.get_window("hann", window)
wmat = sp.tile(wfun[sp.newaxis, :], (incoh_int, 1))
phase_00 = sp.exp(1.0j * 0.0)
phase_10 = sp.exp(1.0j * 0.0)
phase0 = sp.zeros(len(start_vec), dtype=sp.complex64)
phase1 = sp.zeros(len(start_vec), dtype=sp.complex64)
phase_cs0 = sp.zeros(len(start_vec), dtype=float)
phase_cs1 = sp.zeros(len(start_vec), dtype=float)
snr0 = sp.zeros(len(start_vec))
snr1 = sp.zeros(len(start_vec))
std0 = sp.zeros(len(start_vec))
std1 = sp.zeros(len(start_vec))
fi = window // 2
subchan = 0
outspec0 = sp.zeros((len(tvec), window))
outspec1 = sp.zeros((len(tvec), window))
print("Start Beacon Processing")
for i_t, c_st in enumerate(start_vec):
update_progress(float(i_t) / float(len(start_vec)))
t_cur = tvec[i_t]
z00 = drfObj.read_vector(c_st, Nr, chans[0], subchan)[Msamp]
z01 = drfObj.read_vector(c_st + soff, Nr, chans[0], subchan)[Msamp]
z10 = drfObj.read_vector(c_st, Nr, chans[1], subchan)[Msamp]
z11 = drfObj.read_vector(c_st + soff, Nr, chans[1], subchan)[Msamp]
tphase = sp.float64(t_cur + toff)
doppler0 = -1.0 * (150.0 / 400.0) * resid["doppler_residual"](t_cur) - e[
"dop1"
](tphase)
doppler1 = -1.0 * resid["doppler_residual"](t_cur) - e["dop2"](tphase)
osc00 = phase_00 * wmat * sp.exp(1.0j * 2.0 * sp.pi * doppler0 * (Msamp / sps))
osc01 = (
phase_00
* wmat
* sp.exp(1.0j * 2.0 * sp.pi * doppler0 * (Msamp / sps + float(soff) / sps))
)
osc10 = phase_10 * wmat * sp.exp(1.0j * 2.0 * sp.pi * doppler1 * (Msamp / sps))
osc11 = (
phase_10
* wmat
* sp.exp(1.0j * 2.0 * sp.pi * doppler1 * (Msamp / sps + float(soff) / sps))
)
f00 = scfft.fftshift(scfft.fft(z00 * osc00.astype(z00.dtype), axis=-1), axes=-1)
f01 = scfft.fftshift(scfft.fft(z01 * osc01.astype(z01.dtype), axis=-1), axes=-1)
f00spec = sp.power(f00.real, 2).sum(0) + sp.power(f00.imag, 2).sum(0)
outspec0[i_t] = f00spec.real
f00_cor = f00[:, fi] * sp.conj(f01[:, fi])
# Use prod to average the phases together.
phase0[i_t] = sp.cumprod(sp.power(f00_cor, 1.0 / float(incoh_int)))[-1]
phase_cs0[i_t] = sp.cumsum(sp.diff(sp.unwrap(sp.angle(f00[:, fi]))))[-1]
f10 = scfft.fftshift(scfft.fft(z10 * osc10.astype(z10.dtype), axis=-1), axes=-1)
f11 = scfft.fftshift(scfft.fft(z11 * osc11.astype(z11.dtype), axis=-1), axes=-1)
f10spec = sp.power(f10.real, 2).sum(0) + sp.power(f10.imag, 2).sum(0)
f10_cor = f10[:, fi] * sp.conj(f11[:, fi])
outspec1[i_t] = f10spec.real
phase1[i_t] = sp.cumprod(sp.power(f10_cor, 1.0 / float(incoh_int)))[-1]
phase_cs1[i_t] = sp.cumsum(sp.diff(sp.unwrap(sp.angle(f10[:, fi]))))[-1]
std0[i_t] = sp.std(sp.angle(f00_cor))
std1[i_t] = sp.std(sp.angle(f10_cor))
snr0[i_t] = f00spec.real[fi] / sp.median(f00spec.real)
snr1[i_t] = f10spec.real[fi] / sp.median(f10spec.real)
# Phases for next time through the loop
phase_00 = phase_00 * sp.exp(
1.0j * 2.0 * sp.pi * doppler0 * ((ls_samp + 1.0) / sps)
)
phase_10 = phase_10 * sp.exp(
1.0j * 2.0 * sp.pi * doppler1 * ((ls_samp + 1.0) / sps)
)
#
phasecurve = sp.cumsum(sp.angle(phase0) * freq_ratio - sp.angle(phase1))
phasecurve_amp = phase_cs0 * freq_ratio - phase_cs1
stdcurve = sp.sqrt(
sp.cumsum(float(sfactor) * incoh_int * (std0 ** 2.0 + std1 ** 2.0))
)
# SNR windowing, picking values with minimum snr
snrwin = sp.logical_and(snr0 > snrmin, snr1 > snrmin)
phasecurve = phasecurve[snrwin]
phasecurve_amp = phasecurve_amp[snrwin]
stdcurve = stdcurve[snrwin]
snr0 = snr0[snrwin]
snr1 = snr1[snrwin]
tvec = tvec[snrwin]
dt = sp.diff(tvec).mean()
Nside = int(1.0 / dt / 2.0)
lvec = sp.arange(-Nside, Nside)
Lmat, Tmat = sp.meshgrid(lvec, sp.arange(len(tvec)))
Sampmat = Lmat + Tmat
Sampclip = sp.clip(Sampmat, 0, len(tvec) - 1)
eps = s_const.e ** 2 / (8.0 * s_const.pi ** 2 * s_const.m_e * s_const.epsilon_0)
aconst = s_const.e ** 2 / (2 * s_const.m_e * s_const.epsilon_0 * s_const.c)
na = 9.0
nb = 24.0
f0 = 16.668e6
# cTEC = f0*((na*nb**2)/(na**2-nb**2))*s_const.c/(2.*s_const.pi*eps)
cTEC = 1e-16 * sp.power(om1 / om0 ** 2 - 1.0 / om1, -1) / aconst
rTEC = cTEC * phasecurve
rTEC = rTEC - rTEC.min()
rTEC_amp = cTEC * phasecurve_amp
rTEC_amp = rTEC_amp - rTEC_amp.min()
rTEC_sig = cTEC * stdcurve
S4 = sp.std(snr0[Sampclip], axis=-1) / sp.median(snr0, axis=-1)
outdict = {
"rTEC": rTEC,
"rTEC_amp": rTEC_amp,
"rTEC_sig": rTEC_sig,
"S4": S4,
"snr0": snr0,
"snr1": snr1,
"time": tvec,
"resid": resid,
"phase": phasecurve,
"phase_amp": phasecurve_amp,
"phasestd": stdcurve,
"outspec0": outspec0,
"outspec1": outspec1,
}
return outdict
|
f2af0a58d866b79de320e076e3ecc5ae3e704cad
| 33,215 |
def solve(A, b):
"""solve a sparse system Ax = b
Args:
A (torch.sparse.Tensor[b, m, m]): the sparse matrix defining the system.
b (torch.Tensor[b, m, n]): the target matrix b
Returns:
x (torch.Tensor[b, m, n]): the initially unknown matrix x
Note:
'A' should be 'dense' in the first dimension, i.e. the batch dimension
should contain as many elements as the batch size.
'A' should have the same sparsity pattern for every element in the batch.
If this is not the case, you have two options:
1. Create a new sparse matrix with the same sparsity pattern for every element in the batch by adding zeros to the sparse representation.
2. OR loop over the batch dimension and solve sequentially, i.e., with shapes (1, m, m) and (1, m, n) for each element in 'A' and 'b' respectively.
"""
return Solve.apply(A, b)
|
64a51eb8b1bd52ea3c47b80363b68ec346260ac9
| 33,216 |
def smiles_to_fp(smiles):
"""
Convert smiles to Daylight FP and MACCSkeys.
Parameters
----------
smiles : str, smiles representation of a molecule
Returns
-------
fp : np.ndarray zero and one representation of the fingerprints
"""
try:
mol = Chem.MolFromSmiles(smiles)
fp1 = MACCSkeys.GenMACCSKeys(mol)
fp1 = np.fromstring(fp1.ToBitString(), 'int8') - 48
fp1 = fp1.tolist()
except:
fp1 = [np.nan for dummy_idx in range(166)]
return fp1
|
e64b3b94ccf950af41473800e992720b7dd6b155
| 33,218 |
import re
def _networkinfo(interface):
"""Given an interface name, returns dict containing network and
broadcast address as IPv4Interface objects
If an interface has no IP, returns None
"""
ipcmds = "ip -o -4 address show dev {}".format(interface).split()
out = check_output(ipcmds).decode('utf-8')
nwmatch = re.search("inet (\d+\.\d+\.\d+\.\d+/\d+)", out)
if nwmatch is None:
return None
nw = IPv4Interface(nwmatch.groups()[0])
bcastaddrmatch = re.search("brd (\d+\.\d+\.\d+\.\d+)", out)
if bcastaddrmatch is None:
return None
bcastaddr = IPv4Interface(bcastaddrmatch.groups()[0])
return dict(network=nw, bcastaddr=bcastaddr)
|
51ab39e2f05f1dad8d02272c425a357750781414
| 33,219 |
def score_decorator(f):
"""Decorator for sklearn's _score function.
Special `hack` for sklearn.model_selection._validation._score
in order to score pipelines that drop samples during transforming.
"""
def wrapper(*args, **kwargs):
args = list(args) # Convert to list for item assignment
if len(args[0]) > 1: # Has transformers
args[1], args[2] = args[0][:-1].transform(args[1], args[2])
# Return f(final_estimator, X_transformed, y_transformed, ...)
return f(args[0][-1], *tuple(args[1:]), **kwargs)
return wrapper
|
ee3464cb596846f3047fa04a6d40f5bec9634077
| 33,222 |
def trait(name, notify=True, optional=False):
""" Create a new expression for observing a trait with the exact
name given.
Events emitted (if any) will be instances of
:class:`~traits.observation.events.TraitChangeEvent`.
Parameters
----------
name : str
Name of the trait to match.
notify : bool, optional
Whether to notify for changes. Default is to notify.
optional : bool, optional
If true, skip this observer if the requested trait is not found.
Default is false, and an error will be raised if the requested
trait is not found.
Returns
-------
new_expression : ObserverExpression
"""
observer = NamedTraitObserver(
name=name, notify=notify, optional=optional)
return SingleObserverExpression(observer)
|
ecd6b0525efadea6e0a7bc2b57ebaf99f2463cca
| 33,223 |
def get_splits(space,
data,
props,
max_specs=None,
seed=None,
fp_type="morgan"):
"""
Get representations and values of the data given a certain
set of Morgan hyperparameters.
Args:
space (dict): hyperopt` space of hyperparameters
data (dict): dictionary with data for each split
props (list[str]): properties you'll want to predict with the model.
max_specs (int, optional): maximum number of species to use in hyperopt
seed (int, optional): seed to use if we take a subsample of the data
fp_type (str, optional): type of fingerprint to use
Returns:
xy_dic (dict): dictionary of the form {split: [x, y]} for each split,
where x and y are arrays of the input and output.
"""
# get fingerprint arguments
if fp_type == "morgan":
fp_hyper_keys = MORGAN_HYPER_KEYS
elif fp_type == "atom_pair":
fp_hyper_keys = PAIR_FP_HYPER_KEYS
fp_kwargs = {key: val for key, val in space.items()
if key in fp_hyper_keys}
# sample species
sample_data = make_sample_data(max_specs=max_specs,
data=data,
props=props,
seed=seed)
xy_dic = {}
for name in ["train", "val", "test"]:
x, y = make_mol_rep(data=sample_data,
splits=[name],
props=props,
fp_type=fp_type,
fp_kwargs=fp_kwargs)
xy_dic[name] = [x, y]
return xy_dic
|
9ff20a62b7615d3f7b1b3a8ede1c1d4f6ff5bccf
| 33,224 |
def amortized_loan(principal, apr, periods, m=12):
"""
"""
return principal / pvifa(apr, periods, m)
|
f7d67683cd625179012a24e6cd62c0c552009a48
| 33,226 |
from typing import Optional
from typing import Iterable
def horizontal_legend(
fig: Figure,
handles: Optional[Iterable[Artist]] = None,
labels: Optional[Iterable[str]] = None,
*,
ncol: int = 1,
**kwargs,
) -> Legend:
"""
Place a legend on the figure, with the items arranged to read right to left rather than top to bottom.
:param fig: The figure to plot the legend on.
:param handles:
:param labels:
:param ncol: The number of columns in the legend.
:param kwargs: Addition keyword arguments passed to :meth:`matplotlib.figure.Figure.legend`.
"""
if handles is None and labels is None:
handles, labels = fig.axes[0].get_legend_handles_labels()
# Rearrange legend items to read right to left rather than top to bottom.
if handles:
handles = list(filter(None, transpose(handles, ncol)))
if labels:
labels = list(filter(None, transpose(labels, ncol)))
return fig.legend(handles, labels, ncol=ncol, **kwargs)
|
ea229c4deee241c37b3c26a5ab2fab4d2233b8dd
| 33,228 |
def _create_lock_inventory(session, rp_uuid, inventories):
"""Return a function that will lock inventory for this rp."""
def _lock_inventory():
rp_url = '/resource_providers/%s' % rp_uuid
inv_url = rp_url + '/' + 'inventories'
resp = session.get(rp_url)
if resp:
data = resp.json()
generation = data['generation']
else:
_print('failed to lock inventory, no rp')
return False
inventories['VCPU']['reserved'] = inventories['VCPU']['total']
data = {
'inventories': inventories,
'resource_provider_generation': generation,
}
resp = session.put(inv_url, json=data)
if resp:
_print('locking inventory by reserving VCPU')
return True
else:
_print('failed to lock inventory, no write inv')
return False
return _lock_inventory
|
e9988b989cc22f82110923466906aa64a515c41f
| 33,229 |
def strip(string, p=" \t\n\r"):
"""
strip(string, p=" \t\n\r")
"""
return string.strip(p)
|
1be2a256394455ea235b675d51d2023e8142415d
| 33,230 |
def correlation_matrix_plot(
df, function=pearsonr, significance_level=0.05, cbar_levels=8, figsize=(6, 6)
):
"""Plot corrmat considering p-vals."""
corr, pvals = correlation_matrix(df, function=function)
# create triangular mask for heatmap
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
# mask corrs based on p-values
pvals_plot = np.where(pvals > significance_level, np.nan, corr)
# plot
# -------------------------------------------------------------------------
# define correct cbar height and pass to sns.heatmap function
fig, ax = plt.subplots(figsize=figsize)
cbar_kws = {"fraction": 0.046, "pad": 0.04}
sns.heatmap(
corr,
mask=mask,
cmap=sns.diverging_palette(20, 220, n=cbar_levels),
square=True,
vmin=-1,
center=0,
vmax=1,
annot=pvals_plot,
cbar_kws=cbar_kws,
)
title = str(function).split(" ")[1]
plt.title("{}, p < {:.4f}".format(title, significance_level))
plt.tight_layout()
return fig, ax
|
343aa7c0240be34da037ea45bd0020d6eed83770
| 33,232 |
def _unvec(vecA, m=None):
"""inverse of _vec() operator"""
N = vecA.shape[0]
if m is None:
m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64)
return vecA.reshape((N, m, -1), order='F')
|
766dd244016691cc2f29d0af383034116e067401
| 33,233 |
import logging
def one_hot_encode(sequences):
"""One hot encoding of a list of DNA sequences
Args:
sequences (list):: python list of strings of equal length
Returns:
numpy.ndarray: 3-dimension numpy array with shape
(len(sequences), len(list_item), 4)
"""
if len(sequences) == 0:
logging.error("'sequences' is empty")
# make sure all sequences are of equal length
seq_len = len(sequences[0])
for sequence in sequences:
if len(sequence) != seq_len:
logging.error("Incompatible sequence lengths in batch. " +
"All sequences should have the same length.")
return None
# Step 1. convert sequence list into a single string
_sequences = ''.join(sequences)
# Step 2. translate the alphabet to a string of digits
transtab = str.maketrans('ACGTN', '01234')
sequences_trans = _sequences.translate(transtab)
# Step 3. convert to list of ints
int_sequences = list(map(int, sequences_trans))
# Step 4. one hot encode using int_sequences to index
# into an 'encoder' array
encoder = np.vstack([np.eye(4), np.zeros(4)])
X = encoder[int_sequences]
# Step 5. reshape
return X.reshape(len(sequences), len(sequences[0]), 4)
|
3ccb69c433872968510065e2ce86b69558767cf8
| 33,234 |
def binary_cross_entropy(labels, logits, linear_input=True, eps=1.e-5, name='binary_cross_entropy_loss'):
"""
Same as cross_entropy_loss for the binary classification problem. the model should have a one dimensional output,
the targets should be given in form of a matrix of dimensions batch_size x 1 with values in [0,1].
:param labels:
:param logits: sigmoid or linear output of the model
:param linear_input: (default: True) is y is linear in which case tf.nn.sigmoid will be applied to y
:param eps: (optional, default 1.e-5) clipping value for log.
:param name: (optional, default binary_cross_entropy_loss) name scope for the defined operations.
:return: tensor for the cross_entropy_loss (WITHOUT MEAN ON THE EXAMPLES)
"""
with tf.name_scope(name):
sigmoid_out = tf.nn.sigmoid(logits)[:, 0] if linear_input else logits
# tgs = targets if len(targets.)
return - (labels * tf.log(tf.clip_by_value(sigmoid_out, eps, 1. - eps)) +
(1. - labels) * tf.log(tf.clip_by_value(1. - sigmoid_out, eps, 1. - eps)))
|
60c29e67144e91ac384016642322eafe34d70984
| 33,235 |
def get_number_from_user():
""" None -> (int)
Get a symbol by index from the user's input.
"""
movers = ApiWrapper.get_movers()
while True:
print("To choose a company, enter a responding integer from the list below")
print_movers(movers)
y = input("Enter the number of company: ")
if y.isdigit() and 0 < int(y) <= len(movers):
return movers[int(y)-1]
else:
print("You entered the wrong value ", y)
|
f04d312ba115bd601e9d0d6bfde7f614359ba13d
| 33,236 |
def read_refseqscan_results(fn):
"""Read RefSeqScan output file"""
ret = dict()
for line in open(fn):
line = line.strip()
if line == '' or line.startswith('#'):
continue
cols = line.split()
ret[cols[0]] = cols[2]
return ret
|
4450e4113d47e72c5332dd1ca79b37a6847a296f
| 33,238 |
import array
def discretize_categories(iterable):
"""
:param iterable:
:return:
"""
uniques = sorted(set(iterable))
discretize = False
for v in uniques:
if isinstance(v, str):
discretize = True
if discretize: # Discretize and return an array
str_to_int_map = {}
for i, v in enumerate(uniques):
str_to_int_map[v] = i
ints = empty_like(iterable, dtype=int)
for i, v in enumerate(iterable):
ints[i] = str_to_int_map[v]
return ints
else: # Do nothing and return as an array
return array(iterable)
|
a55bb0cc8632274d15f00478783e3def3f4ebd49
| 33,239 |
def multhist(hists, asone=1):
"""Takes a set of histograms and combines them.
If asone is true, then returns one histogram of key->[val1, val2, ...].
Otherwise, returns one histogram per input"""
ret = {}
num = len(hists)
for i, h in enumerate(hists):
for k in sorted(h):
if k not in ret:
ret[k] = [0]*num
ret[k][i] = h[k]
if asone: return ret
# otherwise, convert to separate ones
toret = []
for i in hists:
toret.append({})
for k, vals in ret.iteritems():
for i, v in enumerate(vals):
toret[i][k] = v
return toret
|
0bb6b0af90e75fcfb4c2bee698a123c897bcb64c
| 33,240 |
def to_dict(obj, table=None, scrub=None, fields=None):
"""
Takes a single or list of sqlalchemy objects and serializes to
JSON-compatible base python objects. If scrub is set to True, then
this function will also remove all keys that match the specified list
"""
data = None
serialize_obj = serialize_object if table is None else (
lambda o: serialize_result_row(o, table))
if obj is not None:
try:
_ = iter(obj)
except TypeError:
# not iterable
data = serialize_obj(obj)
if scrub:
data = scrub_dict(data, scrub)
if fields is not None:
data = filter_dict(data, fields)
else:
data = [serialize_obj(o) for o in obj]
if scrub:
data = [scrub_dict(d, scrub) for d in data]
if fields is not None:
data = [filter_dict(d, fields) for d in data]
return data
|
270c22f8a096d65024f0ddc5699ba21155b7c2ef
| 33,241 |
import torch
def CWLoss(output, target, confidence=0):
"""
CW loss (Marging loss).
"""
num_classes = output.shape[-1]
target = target.data
target_onehot = torch.zeros(target.size() + (num_classes,))
target_onehot = target_onehot.cuda()
target_onehot.scatter_(1, target.unsqueeze(1), 1.)
target_var = Variable(target_onehot, requires_grad=False)
real = (target_var * output).sum(1)
other = ((1. - target_var) * output - target_var * 10000.).max(1)[0]
loss = - torch.clamp(real - other + confidence, min=0.)
loss = torch.sum(loss)
return loss
|
6f9a61dcb1b2377e4e76fa71d0de86ee12647f17
| 33,242 |
def get_square(array, size, y, x, position=False, force=False, verbose=True):
"""
Return an square subframe from a 2d array or image.
Parameters
----------
array : 2d array_like
Input frame.
size : int
Size of the subframe.
y : int
Y coordinate of the center of the subframe (obtained with the function
``frame_center``).
x : int
X coordinate of the center of the subframe (obtained with the function
``frame_center``).
position : bool, optional
If set to True return also the coordinates of the bottom-left vertex.
force : bool, optional
Size and the size of the 2d array must be both even or odd. With
``force`` set to True this condition can be avoided.
verbose : bool optional
If True, warning messages might be shown.
Returns
-------
array_out : array_like
Sub array.
y0, x0 : int
[position=True] Coordinates of the bottom-left vertex.
"""
size_init = array.shape[0] # assuming square frames
if array.ndim != 2:
raise TypeError('Input array is not a 2d array.')
if not isinstance(size, int):
raise TypeError('`Size` must be integer')
if size >= size_init: # assuming square frames
msg = "`Size` is equal to or bigger than the initial frame size"
raise ValueError(msg)
if not force:
# Even input size
if size_init % 2 == 0:
# Odd size
if size % 2 != 0:
size += 1
if verbose:
print("`Size` is odd (while input frame size is even). "
"Setting `size` to {} pixels".format(size))
# Odd input size
else:
# Even size
if size % 2 == 0:
size += 1
if verbose:
print("`Size` is even (while input frame size is odd). "
"Setting `size` to {} pixels".format(size))
else:
# Even input size
if size_init % 2 == 0:
# Odd size
if size % 2 != 0 and verbose:
print("WARNING: `size` is odd while input frame size is even. "
"Make sure the center coordinates are set properly")
# Odd input size
else:
# Even size
if size % 2 == 0 and verbose:
print("WARNING: `size` is even while input frame size is odd. "
"Make sure the center coordinates are set properly")
# wing is added to the sides of the subframe center
wing = (size - 1) / 2
y0 = int(y - wing)
y1 = int(y + wing + 1) # +1 cause endpoint is excluded when slicing
x0 = int(x - wing)
x1 = int(x + wing + 1)
if y0 < 0 or x0 < 0 or y1 > size_init or x1 > size_init:
# assuming square frames
raise RuntimeError('square cannot be obtained with size={}, y={}, x={}'
''.format(size, y, x))
array_out = array[y0: y1, x0: x1].copy()
if position:
return array_out, y0, x0
else:
return array_out
|
8d83d4d16241e118bbb65593c14f9f9d5ae0834c
| 33,243 |
def iou_with_anchors(anchors_min, anchors_max, box_min, box_max):
"""Compute jaccard score between a box and the anchors.
"""
len_anchors = anchors_max - anchors_min
int_xmin = np.maximum(anchors_min, box_min)
int_xmax = np.minimum(anchors_max, box_max)
inter_len = np.maximum(int_xmax - int_xmin, 0.)
union_len = len_anchors - inter_len + box_max - box_min
jaccard = np.divide(inter_len, union_len)
return jaccard
|
f3c3a10b8d86bb25ca851998de275a7c5fb8fbec
| 33,244 |
def load_from_np(filename, arr_idx_der):
"""
arr_idx_der 1 for rho and 2 for p
"""
# load npy data of 3D tube
arr = np.load(filename)
arr_t = arr[:, 0]
arr_der = arr[:, arr_idx_der]
return arr_t, arr_der
|
77b64fdf067cf70a6861d75b60c7ca63bbf21de0
| 33,245 |
def rxzero_vel_amp_eval(parm, t_idx):
"""
siglognormal velocity amplitude evaluation
"""
if len(parm) == 6:
D, t0, mu, sigma, theta_s, theta_e = parm
elif len(parm) == 4:
D, t0, mu, sigma = parm
else:
print 'Invalid length of parm...'
return None
#argument for the sig log normal function
t_minus_t0 = t_idx - t0
#truncation to keep time larger than 0
thres = 1e-6
#regularize sig
sigma = thres if sigma < thres else sigma
#t_minus_t0[t_minus_t0 < thres] = thres
res = np.zeros(len(t_idx))
res[t_minus_t0 < thres] = 0.0
res[t_minus_t0 >= thres] = D / (sigma * np.sqrt(2 * np.pi) * (t_minus_t0[t_minus_t0 >= thres]+1e-5)) * np.exp((np.log(t_minus_t0[t_minus_t0 >= thres]) - mu)**2/(-2*sigma**2))
return res
|
acc1c102723d276db6603104ad8ad3848f72b7f8
| 33,246 |
import json
def read_json(json_file):
""" Read input JSON file and return the dict. """
json_data = None
with open(json_file, 'rt') as json_fh:
json_data = json.load(json_fh)
return json_data
|
4e1ea153d040ec0c3478c2d1d3136eb3c48bfe1c
| 33,248 |
def mixnet_xl(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Creates a MixNet Extra-Large model.
Not a paper spec, experimental def by RW w/ depth scaling.
"""
default_cfg = default_cfgs['mixnet_xl']
#kwargs['drop_connect_rate'] = 0.2
model = _gen_mixnet_m(
channel_multiplier=1.6, depth_multiplier=1.2, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
return model
|
e03c12abb4bb2cb43553cecd6b175cf6724cc8c0
| 33,249 |
def accuracy(results):
"""
Evaluate the accuracy of results, considering victories and defeats.
Args:
results: List of 2 elements representing the number of victories and defeats
Returns:
results accuracy
"""
return results[1] / (results[0] + results[1]) * 100
|
911e38741b7c02772c23dd6a347db36b96a0e7e0
| 33,250 |
def sub_sellos_agregar():
"""
Agregar nuevo registro a 'sub_sellos'
"""
form = SQLFORM(db.sub_sellos, submit_button='Aceptar')
if form.accepts(request.vars, session):
response.flash = 'Registro ingresado'
return dict(form=form)
|
8c9ad3c3648bda12f5259b164886a0e6ce822d11
| 33,251 |
async def ensure_valid_path(current_path: str) -> bool:
""" ensures the path is configured to allow auto refresh """
paths_to_check = settings.NO_AUTO_REFRESH
for path in paths_to_check:
if path in current_path:
return False
return True
|
a39c46e0df1db2ac93afbb063e16a3c52cb378eb
| 33,252 |
from typing import List
def part_1_original_approach(lines: List[str]) -> int:
"""
This was my original approach. I missed a few critical details that really bit me in
the ass.
1. Operator precedence for modulo.
"""
ans = 0
seq = [[int(c) for c in x] for x in lines]
for _ in range(100):
ns = [[(a + 1) % 10 for a in r] for r in seq]
to_flash = {
(r, c) for r in range(len(seq)) for c in range(len(seq[0])) if ns[r][c] == 0
}
flashed = set()
while to_flash:
f = to_flash.pop()
if f in flashed:
continue
flashed.add(f)
to_inc = set()
for r, c in grid_adjs(
f, ((0, len(seq)), (0, len(seq[0]))), AdjacenciesType.ALL
):
to_inc.add((r, c))
for r, c in to_inc:
# Here was my operator precedence error. I originally had
# ns[r][c] = 0 if ns[r][c] == 0 else ns[r][c] + 1 % 10
# which is wrong since % binds tighter than +
ns[r][c] = 0 if ns[r][c] == 0 else (ns[r][c] + 1) % 10
if ns[r][c] == 0:
to_flash.add((r, c))
ans += sum(1 for r in ns for c in r if c == 0)
seq = ns
return ans
|
b8fbf734c0a476244cb8f999afa961c8ec0ff737
| 33,254 |
def get_distances(username,location,dist):
"""
The purpose of this function is the calculation of distances between user and created rooms
"""
distances=[]
keys = ['_id','dist']
base=list(nego.find({},{'location':0})) ## Retrieves every user in the base except location
for d in base:
d['dist']=distance_calc(location,d['payload']['location']['val'][0]) ## Calculates distance based on the name of the user and every user
d.pop('location',None)
#d=dict_flatten(d)
dict2 = {x:d[x] for x in keys}
dict2['created_by']=d['payload']['created_by']['val'][0]
distances.append(dict2)
all_data_filt=[x for x in distances if float(x['dist'])<=float(dist) and x['created_by']!=username]
filt_id = list(map(lambda x: x['created_by'], all_data_filt)) #returns the id rather than the usernames for those whose are electible
return filt_id,all_data_filt
|
41c3caf26c46d227367da813fb8e2a0d059e6500
| 33,256 |
import pickle
def AcProgEgrep(context, grep, selection=None):
"""Corresponds to AC_PROG_EGREP_ autoconf macro
:Parameters:
context
SCons configuration context.
grep
Path to ``grep`` program as found by `AcProgGrep`.
selection
If ``None`` (default), the program will be found automatically,
otherwise the method will return the value of **selection**.
.. _AC_PROG_EGREP: http://www.gnu.org/software/autoconf/manual/autoconf.html#index-AC_005fPROG_005fEGREP-262
"""
context.Display("Checking for egrep... ")
context.sconf.cached = 1
action = _ProgGrep(grep, ['-E', '(a|b)'], 'a\n', ['egrep'], ['EGREP$'],'EGREP')
action = _ActionWrapper(action)
args = pickle.dumps({'grep' : grep, 'selection' : selection})
stat, out = context.TryAction(action, args, '.arg')
if stat and out:
out = pickle.loads(out)
context.Result(str(out))
return out
else:
context.Result('not found')
return None
|
d8f8ed373950ef4a9b6aaa2790a988f5aacd9730
| 33,257 |
def is_number(s):
"""Is string a number."""
try:
float(s)
return True
except ValueError:
return False
|
22eb560c2723f6551d1a445ba777208a75139a7c
| 33,258 |
import numpy
def calc_motif_dist(motifList):
"""Given a list of motifs, returns a dictionary of the distances
for each motif pair, e.g. {Motif1:Motif2:(dist, offset, sense/antisense)}
"""
ret = {}
for m1 in motifList:
for m2 in motifList:
if m1.id != m2.id:
#check if the score is already calculated
if ("%s:%s" % (m1.id,m2.id)) not in ret and \
("%s:%s" % (m2.id,m1.id)) not in ret:
#must send in pssms as numpy arrays
dist = bmc.BLiC_score(numpy.array(m1.pssm, float),
numpy.array(m2.pssm, float))
ret["%s:%s" % (m1.id,m2.id)] = dist
return ret
|
1610d2213afc28a810077949c4a763e742f364ab
| 33,259 |
def step(name=None):
"""
Decorates functions that will register
a step.
"""
def decorator(func):
add_step(get_name(name, func), func)
return func
return decorator
|
64a69b5c0f31bef4e5126c869417a9215adc9221
| 33,260 |
def prepare_subimg(image5d, size, offset):
"""Extracts a subimage from a larger image.
Args:
image5d: Image array as a 5D array (t, z, y, x, c), or 4D if
no separate channel dimension exists as with most one channel
images.
size: Size of the region of interest as (z, y, x).
offset: Tuple of offset given as (z, y, x) for the region
of interest. Defaults to (0, 0, 0).
Returns:
The sub-imge without separate time dimension as a 3D (or 4-D
array if channel dimension exists) array.
"""
cube_slices = [slice(o, o + s) for o, s in zip(offset, size)]
libmag.printv("preparing sub-image at offset: {}, size: {}, slices: {}"
.format(offset, size, cube_slices))
# cube with corner at offset, side of cube_len
return image5d[0, cube_slices[0], cube_slices[1], cube_slices[2]]
|
c8c14333ed862fc3acec4347d0462e5dcb644ab8
| 33,261 |
def filter_plants_by_region_id(region_id, year, host='switch-db2.erg.berkeley.edu', area=0.5):
"""
Filters generation plant data by NERC Region, according to the provided id.
Generation plants w/o Region get assigned to the NERC Region with which more
than a certain percentage of its County area intersects (by default, 50%).
A list is saved with Counties and States belonging to the specified Region.
Both County and State are necessary to correctly assign plants (some County
names exist in multiple States).
Returns a DataFrame with the filtered data.
"""
state_dict = {
'Alabama':'AL',
'Alaska':'AK',
'Arizona':'AZ',
'Arkansas':'AR',
'California':'CA',
'Colorado':'CO',
'Connecticut':'CT',
'Delaware':'DE',
'Florida':'FL',
'Georgia':'GA',
'Hawaii':'HI',
'Idaho':'ID',
'Illinois':'IL',
'Indiana':'IN',
'Iowa':'IA',
'Kansas':'KS',
'Kentucky':'KY',
'Louisiana':'LA',
'Maine':'ME',
'Maryland':'MD',
'Massachusetts':'MA',
'Michigan':'MI',
'Minnesota':'MN',
'Mississippi':'MS',
'Missouri':'MO',
'Montana':'MT',
'Nebraska':'NE',
'Nevada':'NV',
'New Hampshire':'NH',
'New Jersey':'NJ',
'New Mexico':'NM',
'New York':'NY',
'North Carolina':'NC',
'North Dakota':'ND',
'Ohio':'OH',
'Oklahoma':'OK',
'Oregon':'OR',
'Pennsylvania':'PA',
'Rhode Island':'RI',
'South Carolina':'SC',
'South Dakota':'SD',
'Tennessee':'TN',
'Texas':'TX',
'Utah':'UT',
'Vermont':'VT',
'Virginia':'VA',
'Washington':'WA',
'West Virginia':'WV',
'Wisconsin':'WI',
'Wyoming':'WY'
}
#getting abbreviated name (regionabr) of NERC region from db (from switch_gis.public schema)
print "Getting NERC region name from database..."
query = "SELECT regionabr FROM ventyx_nerc_reg_region WHERE gid={}".format(
region_id)
region_name = connect_to_db_and_run_query(query=query,
database='switch_gis', host=host)['regionabr'][0]
#read in existing file with list of counties in each state in WECC or if file doesn't exist,
# assign county to state and WECC region if input % of area falls into region
counties_path = os.path.join('other_data', '{}_counties.tab'.format(region_name))
if not os.path.exists(counties_path):
# assign county if (area)% or more of its area falls in the region
query = "SELECT name, state\
FROM ventyx_nerc_reg_region regions CROSS JOIN us_counties cts\
JOIN (SELECT DISTINCT state, state_fips FROM us_states) sts \
ON (sts.state_fips=cts.statefp) \
WHERE regions.gid={region_id} AND\
ST_Area(ST_Intersection(cts.the_geom, regions.the_geom))/\
ST_Area(cts.the_geom)>={area}".format(PREFIX=PREFIX, region_id=region_id, area=area)
print "\nGetting counties and states for the region from database..."
region_counties = pd.DataFrame(connect_to_db_and_run_query(query=query,
database='switch_gis', host=host)).rename(columns={'name':'County','state':'State'})
region_counties.replace(state_dict, inplace=True)
region_counties.to_csv(counties_path, sep='\t', index=False)
else:
print "Reading counties from .tab file..."
region_counties = pd.read_csv(counties_path, sep='\t', index_col=None)
#reading in the processed generator project data from scrape.py from EIA 860 forms for each year
generators = pd.read_csv(
os.path.join('processed_data','generation_projects_{}.tab'.format(year)), sep='\t')
generators.loc[:,'County'] = generators['County'].map(lambda c: str(c).title())
print "\nRead in data for {} generators, of which:".format(len(generators))
print "--{} are existing".format(len(generators[generators['Operational Status']=='Operable']))
print "--{} are proposed".format(len(generators[generators['Operational Status']=='Proposed']))
#if generators don't have a NERC region already from the EIA data, assign region based on join on county and state
generators_with_assigned_region = generators.loc[generators['Nerc Region'] == region_name]
generators = generators[generators['Nerc Region'].isnull()]
generators_without_assigned_region = pd.merge(generators, region_counties, how='inner', on=['County','State'])
generators = pd.concat([
generators_with_assigned_region,
generators_without_assigned_region],
axis=0)
generators.replace(
to_replace={'Energy Source':coal_codes, 'Energy Source 2':coal_codes,
'Energy Source 3':coal_codes}, value='COAL', inplace=True)
generators_columns = list(generators.columns)
existing_gens = generators[generators['Operational Status']=='Operable']
proposed_gens = generators[generators['Operational Status']=='Proposed']
print "======="
print "Filtered to {} projects in the {} region, of which:".format(
len(generators), region_name)
print "--{} are existing with {:.0f} GW of capacity".format(
len(existing_gens), existing_gens['Nameplate Capacity (MW)'].sum()/1000.0)
print "--{} are proposed with {:.0f} GW of capacity".format(
len(proposed_gens), proposed_gens['Nameplate Capacity (MW)'].sum()/1000.0)
print "======="
return generators
|
e9eaa363a4ec2293b97a7a7ffacf82bc0ae49702
| 33,262 |
def _get_image_blob(roidb, scale_ind):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
# processed_ims_depth = []
# processed_ims_normal = []
im_scales = []
for i in xrange(num_images):
# rgba
rgba = cv2.imread(roidb[i]['image'], cv2.IMREAD_UNCHANGED)
if rgba.shape[2] == 4:
im = np.copy(rgba[:,:,:3])
alpha = rgba[:,:,3]
I = np.where(alpha == 0)
im[I[0], I[1], :] = 0
else:
im = rgba
# chromatic transform
if cfg.TRAIN.CHROMATIC:
label = cv2.imread(roidb[i]['label'], cv2.IMREAD_UNCHANGED)
im = chromatic_transform(im, label)
if roidb[i]['flipped']:
im = im[:, ::-1, :]
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_scale = cfg.TRAIN.SCALES_BASE[scale_ind]
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
im_scales.append(im_scale)
processed_ims.append(im)
# # depth
# im_depth_raw = cv2.imread(roidb[i]['depth'], cv2.IMREAD_UNCHANGED)
# height = im_depth_raw.shape[0]
# width = im_depth_raw.shape[1]
# im_depth = im_depth_raw.astype(np.float32, copy=True) / float(im_depth_raw.max()) * 255
# im_depth = np.tile(im_depth[:,:,np.newaxis], (1,1,3)) # turn to (H,W,3), the last channel is the same
# if roidb[i]['flipped']:
# im_depth = im_depth[:, ::-1]
# im_orig = im_depth.astype(np.float32, copy=True)
# im_orig -= cfg.PIXEL_MEANS
# im_depth = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
# processed_ims_depth.append(im_depth)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims, 3)
# blob_depth = im_list_to_blob(processed_ims_depth, 3)
return blob, [], im_scales
|
69200c535818d159b10f80d9c967546cbbd33a75
| 33,263 |
def translate_month(month):
"""
Translates the month string into an integer value
Args:
month (unicode): month string parsed from the website listings.
Returns:
int: month index starting from 1
Examples:
>>> translate_month('jan')
1
"""
for key, values in months.items():
if month in values:
return key
|
5ff0e3506e37e4b9b5cdd2cd3bf5b322622172ab
| 33,264 |
def gencpppxd(desc, exception_type='+'):
"""Generates a cpp_*.pxd Cython header file for exposing C/C++ data from to
other Cython wrappers based off of a dictionary description.
Parameters
----------
desc : dict
Class description dictonary.
exception_type : str, optional
Cython exception annotation. Set to None when exceptions should not
be included.
Returns
-------
cpppxd : str
Cython cpp_*.pxd header file as in-memory string.
"""
pars = ', '.join([cython_ctype(p) for p in desc['parents'] or ()])
d = {'parents': pars if 0 == len(pars) else '('+pars+')'}
copy_from_desc = ['name', 'namespace', 'header_filename']
for key in copy_from_desc:
d[key] = desc[key]
inc = set(['c'])
cimport_tups = set()
for parent in desc['parents'] or ():
cython_cimport_tuples(parent, cimport_tups, inc)
alines = []
attritems = sorted(desc['attrs'].items())
for aname, atype in attritems:
if aname.startswith('_'):
continue
alines.append("{0} {1}".format(cython_ctype(atype), aname))
cython_cimport_tuples(atype, cimport_tups, inc)
d['attrs_block'] = indent(alines, 8)
mlines = []
clines = []
estr = str() if exception_type is None else ' except {0}'.format(exception_type)
methitems = sorted(expand_default_args(desc['methods'].items()))
for mkey, mrtn in methitems:
mname, margs = mkey[0], mkey[1:]
if mname.startswith('_') or mname.startswith('~'):
continue # private or destructor
argfill = ", ".join([cython_ctype(a[1]) for a in margs])
for a in margs:
cython_cimport_tuples(a[1], cimport_tups, inc)
line = "{0}({1}){2}".format(mname, argfill, estr)
if mrtn is None:
# this must be a constructor
if line not in clines:
clines.append(line)
else:
# this is a normal method
rtype = cython_ctype(mrtn)
cython_cimport_tuples(mrtn, cimport_tups, inc)
line = rtype + " " + line
if line not in mlines:
mlines.append(line)
d['methods_block'] = indent(mlines, 8)
d['constructors_block'] = indent(clines, 8)
d['cimports'] = "\n".join(sorted(cython_cimports(cimport_tups)))
d['extra'] = desc.get('extra', {}).get('cpppxd', '')
cpppxd = _cpppxd_template.format(**d)
if 'cpppxd_filename' not in desc:
desc['cpppxd_filename'] = 'cpp_{0}.pxd'.format(d['name'].lower())
return cpppxd
|
a2e7cc486589ee301483b8a76b65313eb754221d
| 33,265 |
def no_results_to_show():
"""Produce an error message when there are no results to show."""
return format_html('<p class="expenses-empty">{}</p>', _("No results to show."))
|
492c1c19d4daf159a495c001bfc6671fdf6ed593
| 33,267 |
def enhance_shadows(Shw, method, **kwargs):
""" Given a specific method, employ shadow transform
Parameters
----------
Shw : np.array, size=(m,n), dtype={float,integer}
array with intensities of shading and shadowing
method : {‘mean’,’kuwahara’,’median’,’otsu’,'anistropic'}
method name to be implemented,can be one of the following:
* 'mean' : mean shift filter
* 'kuwahara' : kuwahara filter
* 'median' : iterative median filter
* 'otsu' : buffered Otsu filter
* 'anistropic' : anistropic diffusion filter
Returns
-------
M : np.array, size=(m,n), dtype={float,integer}
shadow enhanced image, done through the given method
See Also
--------
mean_shift_filter, kuwahara_filter, iterative_median_filter, otsu_filter,
anistropic_diffusion_scalar
"""
if method in ('mean'):
quantile=0.1 if kwargs.get('quantile')==None else kwargs.get('quantile')
M = mean_shift_filter(Shw, quantile=quantile)
elif method in ('kuwahara'):
tsize=5 if kwargs.get('tsize')==None else kwargs.get('tsize')
M = kuwahara_filter(Shw, tsize=tsize)
elif method in ('median'):
tsize=5 if kwargs.get('tsize')==None else kwargs.get('tsize')
iter=50 if kwargs.get('loop')==None else kwargs.get('loop')
M = iterative_median_filter(Shw,tsize=tsize, loop=iter)
elif method in ('otsu'):
tsize = 5 if kwargs.get('tsize') == None else kwargs.get('tsize')
M = otsu_filter(Shw, tsize=tsize)
elif method in ('anistropic'):
iter = 10 if kwargs.get('iter') == None else kwargs.get('iter')
K = .15 if kwargs.get('K') == None else kwargs.get('K')
s = .25 if kwargs.get('s') == None else kwargs.get('s')
n = 4 if kwargs.get('n') == None else kwargs.get('n')
M = anistropic_diffusion_scalar(Shw, iter=iter, K=K, s=s, n=n)
else:
assert 1==2, 'please provide a correct method'
return M
|
daddde00889be9eb6a1bb57fc140363d85ede32a
| 33,269 |
from typing import Optional
from typing import Set
import collections
def _to_real_set(
number_or_sequence: Optional[ScalarOrSequence]
) -> Set[chex.Scalar]:
"""Converts the optional number or sequence to a set."""
if number_or_sequence is None:
return set()
elif isinstance(number_or_sequence, (float, int)):
return {number_or_sequence} # pytype: disable=bad-return-type
elif (isinstance(number_or_sequence, collections.abc.Sequence) and
all(isinstance(x, (int, float)) for x in number_or_sequence)):
return set(number_or_sequence)
else:
raise ValueError(f"Expecting a real-number or a sequence of reals, but got "
f"{type(number_or_sequence)}.")
|
c0f380a9a179634da04447198ad6553c3c684f7c
| 33,270 |
def alt_or_ref(record, samples: list):
"""
takes in a single record in a vcf file and returns the sample names divided into two lists:
ones that have the reference snp state and ones that have the alternative snp state
Parameters
----------
record
the record supplied by the vcf reader
samples: list
list of sample names
Returns
-------
ref_group, alt_group : list
lists of samples divided by ref or alt snp state
"""
tracker = 0
ref_group = []
alt_group = []
for call in record.calls:
state = int(call.data.get('GT'))
sample_name = samples[tracker]
if state == 0:
ref_group.append(sample_name)
elif state == 1:
alt_group.append(sample_name)
else:
print("there is a problem reading the state information")
raise SystemExit(0)
tracker += 1
return ref_group, alt_group
|
abaccfeef02ee625d103da88b23fce82a40bc04c
| 33,271 |
def plot_loss(ctx, tests, rulers=[], sfx="", **kwargs):
"""Loss plot (1 row per test, val on left, train on right)."""
vh = len(tests)
fig, axs = plt.subplots(vh, 2, figsize=(16, 4 * vh))
for base, row in zip(tests, axs.reshape(vh, 2)):
ctx.plot_loss(
base, row[0], baselines=["adam"], validation=True, **kwargs)
ctx.plot_loss(
base, row[1], baselines=["adam"], validation=False, **kwargs)
for r in rulers:
row[0].axline(r, color='black')
row[0].set_title(ctx.get_name(base))
row[1].set_title(ctx.get_name(base))
fig.tight_layout()
return fig, axs
|
d215ec0bc20520380bf0625f27abbad383981cd3
| 33,272 |
def mpi_rank():
"""
Returns the rank of the calling process.
"""
comm = mpi4py.MPI.COMM_WORLD
rank = comm.Get_rank()
return rank
|
63fba63118edbced080b5077931c0d63d2c672b2
| 33,273 |
def db(app):
"""
Setup our database, this only gets executed once per session.
:param app: Pytest fixture
:return: SQLAlchemy database session
"""
_db.drop_all()
_db.create_all()
# Create a single user because a lot of tests do not mutate this user.
# It will result in faster tests.
params = {
'role': 'admin',
'email': '[email protected]',
'password': 'password'
}
admin = User(**params)
_db.session.add(admin)
_db.session.commit()
return _db
|
0176de576b1f217ce56e61cdba5b2deb287d7430
| 33,274 |
def is_const_component(record_component):
"""Determines whether a group or dataset in the HDF5 file is constant.
Parameters
----------
record_component : h5py.Group or h5py.Dataset
Returns
-------
bool
True if constant, False otherwise
References
----------
.. https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md,
section 'Constant Record Components'
"""
return "value" in record_component.attrs.keys()
|
4adb2ff7f6fb04086b70186a32a4589ae9161bb5
| 33,275 |
def centernet_resnet101b_voc(pretrained_backbone=False, classes=20, **kwargs):
"""
CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 20
Number of classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnet101b(pretrained=pretrained_backbone).features
del backbone.final_pool
return get_centernet(backbone=backbone, backbone_out_channels=2048, classes=classes,
model_name="centernet_resnet101b_voc", **kwargs)
|
39b6ed4aa1d5c1143ef3b35f12f6be71160ec5cf
| 33,276 |
import pytz
def isodate(dt):
"""Formats a datetime to ISO format."""
tz = pytz.timezone('Europe/Zagreb')
return dt.astimezone(tz).isoformat()
|
d07118e188772ec6a87d554c6883530164eeb550
| 33,278 |
def _ncells_after_subdiv(ms_inf, divisor):
"""Calculates total number of vtu cells in partition after subdivision
:param ms_inf: Mesh/solninformation. ('ele_type', [npts, nele, ndims])
:type ms_inf: tuple: (str, list)
:rtype: integer
"""
# Catch all for cases where cell subdivision is not performed
if divisor is None:
divisor = 1
# Calculate the number of low-order cells in each high order element
n_sub_ele = divisor ** ms_inf[1][2]
# Pyramids require the further addition of an arithmetic series
if ms_inf[0] == 'pyr':
n_sub_ele += (divisor - 1) * divisor / 2
# Multiply by number of elements
return n_sub_ele * ms_inf[1][1]
|
981db31a7729c0cac88575b1cb12505a30cf0abb
| 33,279 |
def match_santa_pairs(participants: list):
""" This function returns a list of tuples of (Santa, Target) pairings """
shuffle(participants)
return list(make_circular_pairs(participants))
|
dc002f82d25df89ee70392ff48fdd401a960ccc5
| 33,280 |
def conv_relu_forward(x, w, b, conv_param):
"""
A convenience layer that performs a convolution followed by a ReLU.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
out, relu_cache = relu_forward(a)
cache = (conv_cache, relu_cache)
return out, cache
|
52ee4941c7cf48179652a0f0e26a4d271579047f
| 33,281 |
def top_rank(df, target, n=None, ascending=False, method='spearman'):
"""
Calculate first / last N correlation with target
This method is measuring single-feature relevance importance and works well for independent features
But suffers in the presence of codependent features.
pearson : standard correlation coefficient
kendall : Kendall Tau correlation coefficient
spearman : Spearman rank correlation
:return:
"""
if not n:
n = len(df.columns)
if method == 'PCA':
scaler = StandardScaler()
feas = [col for col in df.columns if col != target]
X = scaler.fit_transform(df.loc[:, feas])
pca = PCA(n_components=0.9)
pca.fit(X)
featimp = {feas[i]:abs(pca.components_[0])[i] for i in range(len(feas))}
feas = sorted(featimp, key=featimp.get, reverse=True)[:n]
vals = [featimp[fea] for fea in feas]
else:
feas = list(abs(df.corr(method=method)[target]).sort_values(ascending=ascending).index[1:n+1])
vals = list(abs(df.corr(method=method)[target]).sort_values(ascending=ascending))[1:n+1]
return feas, vals
|
f8e2b0b9888af00c6c75acac4f5063580bbada07
| 33,282 |
def sample_points_on_sphere(center, distance_from_center, hemisphere=False):
"""just use the polar coordinates to do this, this can be sped up do that
"""
EPS = 1e-6
thetas = np.linspace(0+EPS, 2*np.pi, 64)
phis = np.linspace(0+EPS, np.pi/2, 64) if hemisphere else np.linspace(0+EPS, np.pi, 64)
points = list()
for phi in phis:
x = distance_from_center * np.cos(thetas) * np.sin(phi)
y = distance_from_center * np.sin(thetas) * np.sin(phi)
z = np.repeat(distance_from_center * np.cos(phi), len(x))
points.append(np.c_[x, y, z])
# stack 'em up
points = np.stack(points).reshape(-1, 3)
points[:, 0] += center[0]
points[:, 1] += center[1]
points[:, 2] += center[2]
return points
|
0441a58da5e9bd6cccd8aeae29c022ffd6e6eae8
| 33,283 |
from pathlib import Path
def get_package_path() -> Path:
"""
Get local install path of the package.
"""
return to_path(__file__).parent.absolute()
|
7976606ad2731b408dd6a44d72e27f6307c2fa8b
| 33,284 |
import inspect
import types
def parameterized_class(cls):
"""A class decorator for running parameterized test cases.
Mark your class with @parameterized_class.
Mark your test cases with @parameterized.
"""
test_functions = inspect.getmembers(cls, predicate=inspect.ismethod)
for (name, f) in test_functions:
if name.startswith('test_') and not hasattr(f, '_test_data'):
continue
# remove the original test function from the class
delattr(cls, name)
# add a new test function to the class for each entry in f._test_data
for tag, args in f._test_data.items():
new_name = "{0}_{1}".format(f.__name__, tag)
if hasattr(cls, new_name):
raise Exception(
"Parameterized test case '{0}.{1}' created from '{0}.{2}' "
"already exists".format(cls.__name__, new_name, name))
# Using `def new_method(self): f(self, **args)` is not sufficient
# (all new_methods use the same args value due to late binding).
# Instead, use this factory function.
new_method = def_method(f, **args)
# To add a method to a class, available for all instances:
# MyClass.method = types.MethodType(f, None, MyClass)
setattr(cls, new_name, types.MethodType(new_method, None, cls))
return cls
|
084ec02b2c9427ffb9ddd41ef9857390477d9d6f
| 33,285 |
def isStringLike(s):
""" Returns True if s acts "like" a string, i.e. is str or unicode.
Args:
s (string): instance to inspect
Returns:
True if s acts like a string
"""
try:
s + ''
except:
return False
else:
return True
|
73fc002843735536c159eed91cf54886f52e78e7
| 33,286 |
def velocity_to_wavelength(velocities, input_units, center_wavelength=None,
center_wavelength_units=None, wavelength_units='meters',
convention='optical'):
"""
Conventions defined here:
http://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html
* Radio V = c (c/l0 - c/l)/(c/l0) f(V) = (c/l0) ( 1 - V/c )
* Optical V = c ((c/l0) - (c/l))/(c/l) f(V) = (c/l0) ( 1 + V/c )^-1
* Redshift z = ((c/l0) - (c/l))/(c/l) f(V) = (c/l0) ( 1 + z )-1
* Relativistic V = c ((c/l0)^2 - (c/l)^2)/((c/l0)^2 + (c/l)^2) f(V) = (c/l0) { 1 - (V/c)2}1/2/(1+V/c)
"""
if input_units in wavelength_dict:
print "Already in wavelength units (%s)" % input_units
return velocities
if center_wavelength is None:
raise ValueError("Cannot convert velocity to wavelength without specifying a central wavelength.")
if wavelength_units not in wavelength_dict:
raise ValueError("Bad wavelength units: %s" % (wavelength_units))
velocity_ms = velocities / velocity_dict['m/s'] * velocity_dict[input_units]
center_frequency = speedoflight_ms / center_wavelength
if convention == 'radio':
wav = (velocity_ms / speedoflight_ms - 1.0) * center_frequency * -1
elif convention == 'optical':
wav = (velocity_ms / speedoflight_ms + 1.0)**-1 * center_frequency
elif convention == 'relativistic':
wav = (-(velocity_ms / speedoflight_ms)**2 + 1.0)**0.5 / (1.0 + velocity_ms/speedoflight_ms) * center_frequency
else:
raise ValueError('Convention "%s" is not allowed.' % (convention))
wavelengths = wav / wavelength_dict[wavelength_units] * wavelength_dict[center_wavelength_units]
return wavelengths
|
aff040967297848253e49272eb6b5ba4e687433b
| 33,287 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.