content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import os
def get_ground_weather_one_place(dir_path):
""" 1地点の地上気象データを取得する
Args:
dir_path(string) : ディレクトリパス
Returns:
DataFrame : ファイルの読込結果
"""
# 地上気象データのファイル一覧取得
file_paths = read_csv.get_file_paths(dir_path)
# 気象データを読み込み、DataFrameに格納する
ground_df = None
for file_path in file_paths:
# 地上気象データ読み込み
df = read_csv.read_ground(file_path)
# 指定した行のデータを抽出
df1 = wdfproc.extract_row_isin(df, ('時', '時'), [9, 21])
# DataFrameに追加する
if ground_df is None:
ground_df = df1
else:
ground_df = ground_df.append(df1)
# 地点名を取得する
dirname = os.path.basename(dir_path)
elements = name_handle.elements_from_dirname_ground(dirname)
place_name = elements['name']
# 列名を変更する
ground_df = wdfproc.rename_column_ground(ground_df, place_name)
return ground_df | b25c6008c1f1cf9760c16dc10fd44dee8e62ad55 | 5,200 |
def trash_description(spl, garbage, keyword, description="description_1"):
"""description_1 OR description_2"""
relocate = spl[spl[description].str.contains(keyword, na=False, regex=True)]
spl = spl[~spl[description].str.contains(keyword, na=False, regex=True)]
garbage = pd.concat([garbage, relocate], ignore_index=True, sort=False)
return (spl, garbage, relocate) | 16a1512ddaf914bd5ebcd00f2dcdfa11d59ec73c | 5,201 |
import random
def prepositionalPhrase():
"""Builds and returns a prepositional phrase."""
return random.choice(prepositions) + " " + nounPhrase() | 33a6f1111f752c160ef90eedde4bf56b79b1100a | 5,202 |
def check_possible_dtype(df):
"""Guess dtypes for each column in a dataframe, where dataframe must contains only string values.
Raise an exception if dataframe contains non-string values.
:param df: a DataFrame whose all values must be strings.
"""
column = []
int_cnt = []
dec_cnt = []
str_cnt = []
d = {"column": column, "int_cnt": int_cnt, "dec_cnt": dec_cnt, "str_cnt": str_cnt}
for i in df.columns:
ser = df[i].drop_duplicates()
column.append(i)
int_cnt.append(ser.apply(lambda x: is_int_str(x)).sum())
dec_cnt.append(ser.apply(lambda x: is_dec_str(x)).sum())
str_cnt.append(ser.apply(lambda x: not is_number_str(x)).sum())
dtype_options_df = pd.DataFrame(d, columns=["column", "int_cnt", "dec_cnt", "str_cnt"])
# Best-effort guess on dtype
guessed_dtype = dtype_options_df.apply(guess_dtype, axis=1).rename("guessed_type_for_non_nan")
return pd.concat([dtype_options_df, guessed_dtype], axis=1) | 0e9759959af04fbf1bb9db3672f6a188afe7f6ab | 5,203 |
from typing import List
def filter_objects_avoiding_duplicated(objects: List[Object],
max_distance: int = 20) -> List[Object]:
"""Filtra los objetos evitando aquellas posibles que sean detecciones múltiples.
El fundamento del algoritmo es que si se detectan dos objetos con un centroide muy cercano, a
una distancia máxima indicada por ``max_distance``, entonces es una detección múltiple.
El conflicto se resuelve eliminando las detecciones múltiple y escogiendo la que mejor
puntuación ha obtenido en la detección.
:param objects: lista de objetos.
:param max_distance: máxima distancia entre centros para considerar que ese objeto puede ser
un duplicado.
:return: lista de objetos filtrados.
"""
# Lista de las posiciones en 'objects' de los objetos eliminados.
removed_objects_id = list()
# Buscar los posibles candidatos para cada objeto.
for obj_id, obj_detection in enumerate(objects):
for candidate_id, candidate_detection in enumerate(objects):
# Ignorar el mismo objeto como posible candidato.
if obj_id == candidate_id:
continue
# Ignorar si alguno de los que se está comparando ha sido eliminado ya.
if obj_id in removed_objects_id or candidate_id in removed_objects_id:
continue
# Calcular la distancia euclídea entre ambas detecciones.
p = np.array(obj_detection.center)
q = np.array(candidate_detection.center)
distance = np.linalg.norm(p - q)
# Si hay poca distancia, puede ser el mismo objeto.
if distance <= max_distance:
# Eliminar el que menos puntuación tiene.
if obj_detection.score > candidate_detection.score:
removed_objects_id.append(candidate_id)
else:
removed_objects_id.append(obj_id)
# Lista de los objetos que han pasado el filtro.
objects_filtered: List[Object] = list()
for obj_id, obj_detection in enumerate(objects):
if obj_id not in removed_objects_id:
objects_filtered.append(obj_detection)
return objects_filtered | 042fee5df94dc1c72fb53635577c8006c57f73f9 | 5,204 |
import os
def splitall(path):
"""
Credit goes to Trent Mick
SOURCE:
https://www.oreilly.com/library/view/python-cookbook/0596001673/ch04s16.html
"""
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts | 3d25bdfab5fd74d59e67100c864c950a2aaaa78b | 5,205 |
import os
def netstat():
"""
Return list of all connections.
Return list of TCP listenning connections and UDP connections.
All localhost connections are filtered out.
This script must run as root in order to be able to obtain PID values
of all processes. For more information see:
https://unix.stackexchange.com/questions/226276/read-proc-to-know-if-a-process-has-opened-a-port
:return: List of connections
"""
uid = os.getuid()
assert uid == 0, "This script must run as root"
tcp4_list = netstat_tcp4()
udp4_list = netstat_udp4()
tcp6_list = netstat_tcp6()
udp6_list = netstat_udp6()
raw_list = netstat_raw()
return tcp4_list + udp4_list + tcp6_list + udp6_list + raw_list | 04884bd091438956012c1524671a1ffdfddc4c6f | 5,206 |
def print_hdr(soup, hdr, file = None):
"""
:param soup: [bs4.BeautifulSoup] document context
:param hdr: [dict] header node to process
:param file: [stream] I/O stream to print to
:return: [stream] pass on the I/O stream so descent continues
"""
tag = hdr['tag']
tag_id = tag['id']
indent = (hdr['level'] - 1) * ' '
# do this replacement for (relative) readability
content_tags = ["<%s>" % (h.name) if h.name else h.string for h in hdr['content']]
print("%s%s - %s %s" % (indent, tag.name, tag_id, content_tags), file=file)
return file | 2c6fd613a5c6ddb5ec842fb7cee845d1a8771ccd | 5,207 |
from unittest.mock import Mock
def __empty_2():
""" Empty used as parent of cube_2 """
obj = Mock()
obj.name = 'empty_2'
obj.mode = 'OBJECT'
obj.to_mesh.return_value = None
obj.matrix_world = Matrix.Identity(4)
obj.visible_get.return_value = False
obj.hide_viewport = True
obj.hide_render = True
return obj | 024614d7967da5da6d6629167a20eda4188e812f | 5,208 |
import argparse
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="A scaffolding program for developer notes")
parser.add_argument(
"-v",
"--version",
action="version",
version="nota {ver}".format(ver=__version__))
parser.add_argument(
dest="name",
help="name of new note",
metavar="<name>")
parser.add_argument(
"-c",
"--config",
dest="config",
help="configuration file location")
parser.add_argument(
"-t",
"--template",
dest="template",
help="custom template file location")
parser.add_argument(
"-i",
"--identifier",
dest="identifier",
help="custom note identifier")
parser.add_argument(
"--directories",
dest="dirs",
help="additional directories to create",
action="append",
nargs="+")
parser.add_argument(
"--filename",
dest="filename",
help="custom note filename")
parser.add_argument(
"-l",
"--list",
dest="list",
help="lists all notes available",
action="store_true")
parser.add_argument(
"-r",
"--root",
dest="root",
help="root directory for all notes")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"-d",
"--defect",
dest="note_type",
help="create a defect note",
action="store_const",
const=NoteType.Defect)
group.add_argument(
"-b",
"--bug",
dest="note_type",
help="create a bug note",
action="store_const",
const=NoteType.Bug)
group.add_argument(
"-s",
"--story",
dest="note_type",
help="create a story note",
action="store_const",
const=NoteType.Story)
group.add_argument(
"-f",
"--feature",
dest="note_type",
help="create a feature note",
action="store_const",
const=NoteType.Feature)
group.add_argument(
"-o",
"--option",
dest="custom",
help="create a custom note")
return parser.parse_args(args) | 8217e73fe219e18a8a7c8d0560fba95c5c3458df | 5,209 |
def get_gradient(bf_data: np.ndarray, smooth=10):
"""
Removes first dimension,
Computes gradient of the image,
applies gaussian filter
Returns SegmentedImage object
"""
data = strip_dimensions(bf_data)
gradient = get_2d_gradient(data)
smoothed_gradient = gaussian_filter(gradient, smooth)
# sm = multiwell.gaussian_filter(well, smooth)
return smoothed_gradient.reshape(bf_data.shape) | 864b3bc118d08099c56657b2f2883e20de5c663e | 5,210 |
def sum_seq(seq):
""" Lambda wrapper for sum. """
return K.sum(seq, axis=1, keepdims=False) | e2bf342f6cda9bda50dc15814c7808a42e8a9925 | 5,211 |
def split_by_time(files_rad):
"""Separate a list of files by their timestamp"""
out = {}
if type(files_rad) == dict:
for k in files_rad.keys():
out[k] = _split_by_time(files_rad[k])
else:
out = _split_by_time(files_rad)
return out | 9a77b3db2e21c27198337b1a1852494bca5acefb | 5,212 |
def make_general_csv_rows(general_csv_dict):
"""
Method for make list of metrics from general metrics dict.
Rows using in general metrics writer
:param general_csv_dict: dict with all metrics
:type general_csv_dict: dict
:return: all metrics as rows
:rtype: list
"""
rows = []
for key, value in general_csv_dict.items():
row = [key[0], key[1]]
row.extend(value)
rows.append(row)
return rows | 45ca165d312b39cd0b7088e0bcbfb402a92e7e2b | 5,213 |
def build_hstwcs(crval1, crval2, crpix1, crpix2, naxis1, naxis2, pscale, orientat):
""" Create an HSTWCS object for a default instrument without distortion
based on user provided parameter values.
"""
wcsout = wcsutil.HSTWCS()
wcsout.wcs.crval = np.array([crval1,crval2])
wcsout.wcs.crpix = np.array([crpix1,crpix2])
wcsout.naxis1 = naxis1
wcsout.naxis2 = naxis2
wcsout.wcs.cd = fileutil.buildRotMatrix(orientat)*[-1,1]*pscale/3600.0
# Synchronize updates with PyWCS/WCSLIB objects
wcsout.wcs.set()
wcsout.setPscale()
wcsout.setOrient()
wcsout.wcs.ctype = ['RA---TAN','DEC--TAN']
return wcsout | 0247a8dc7e6aa083db50f21d82676216583be206 | 5,214 |
def build_regressor_for_ranking_positive_class(dataset, features, regression_target=TARGET_COLUMN):
"""This function builds a regressor based exclusively on positive class'
examples present in the dataset
"""
if regression_target in features:
print('The target for the regression task cannot be one of the features')
return
positive_examples = dataset.loc[dataset[TARGET_COLUMN] > ALPHA]
X = positive_examples[features]
y = positive_examples[regression_target]
regressor = RandomForestRegressor(random_state=20)
regressor.fit(X, y)
return regressor | 1312751425f79c1e4fec09f705f0ea551e2a60b3 | 5,215 |
def get_speakable_timestamp(timestamp):
"""Return a 'speakable' timestamp, e.g. 8am, noon, 9pm, etc."""
speakable = f"{timestamp.strftime('%I').lstrip('0')} {timestamp.strftime('%p')}"
if speakable == '12 PM':
return 'noon'
elif speakable == '12 AM':
return 'midnight'
return speakable | 0b724686ebd5d3152d9017dc456d2945c78be0ee | 5,216 |
def createColor(red: int, green: int, blue: int) -> tuple:
"""
Create color
Parameters:
red -> 0-255
green -> 0-255
blue -> 0-255
"""
return tuple(
max(min(red, 255), 0),
max(min(green, 255), 0),
max(min(blue, 255), 0)
) | 3e8ee43e9d458668f4312f9fd75050b5875036d7 | 5,217 |
from typing import List
def export_nodeclass_list(node_classes: List[NodeClass]) -> str:
"""Writes the Node data as a XML string. Does not write
to a file -- use ``with open(output_file) as out_stream:`` etc.
"""
# This is the data string, the rest is formalities
node_classes_string = '\n'.join([str(c) for c in node_classes])
lines = list()
lines.append('<?xml version="1.0" encoding="utf-8"?>')
lines.append('<NodeClasses noNamespaceSchema="mff-muscima-mlclasses.xsd">')
lines.append(node_classes_string)
lines.append('</NodeClasses>')
return '\n'.join(lines) | f50638e9b3a7ab2f1df6e49703b9ed3e39916f9d | 5,218 |
import time
def recognition(request):
"""
style transform service
"""
if request.method == 'POST':
name = ''
predicitons = ''
try:
# load image
now = time.localtime()
img = request.FILES['image']
image_name = '{}{}{}{}{}object.jpg'.format(now[1], now[2], now[3], now[4], now[5])
# get prediction
predicitons = predict_app(img)
# save to database
Image = ContentImage()
Image.name = 'static/images/predict/' + image_name
Image.save()
# save to disk
addr = BASE_DIR + 'predict/' + image_name
save_to_disk(addr, img)
image_url = 'images/predict/' + image_name
except Exception as e:
print(e)
return render(request, 'recognition/basic.html', {})
return render(request, 'recognition/basic.html', {'image_url':image_url, 'predictions': predicitons})
if request.method == 'GET':
return render(request, 'recognition/basic.html', {}) | d8de5ab5c33e6ca0c2ac5afbec81c402f7151187 | 5,219 |
def url(s):
"""Validate url input"""
u = urlparse(s)
if u.scheme not in ["http", "https"]:
raise ValueError(s)
return u.geturl() | 82683af4ad6fb35b6d74409a9a429c4dfd81a723 | 5,220 |
import pickle
def getGPLCs(df, savepath='./',plotpath='./', bands='ugrizY', ts='0000000', fn='GPSet'):
"""Short summary.
Parameters
----------
df : type
Description of parameter `df`.
savepath : type
Description of parameter `savepath`.
plotpath : type
Description of parameter `plotpath`.
bands : type
Description of parameter `bands`.
ts : type
Description of parameter `ts`.
fn : type
Description of parameter `fn`.
Returns
-------
type
Description of returned object.
"""
#num_bands = len(np.unique(band_idx))
Npt = 100
tmin = -30
tmax = 150
num_bands = len(bands)
GP_dict = {}
# make our plots look nice
stylePlots()
for idx, row in df.iterrows():
t = np.array(row["T"])
f = np.array(row["Flux"])
f[f<0.] = 0. #getting rid of negative flux
#the magnitude-like array for the sake of the conversion
y = np.log(f + 1)
yerr = np.array(row["Flux_Err"]) / np.array(row["Flux"])
t_test = np.linspace(tmin, tmax, Npt)
band = row["Filter"]
band_idx = pd.Series(row['Filter']).astype('category').cat.codes.values
matrix = [t_test]
def build_gp(params):
time_kernel = tinygp.kernels.Matern32(jnp.exp(params["log_scale"]))
kernel = Multiband(time_kernel, jnp.exp(params["log_diagonal"]), params["off_diagonal"])
diag = yerr ** 2 + jnp.exp(2 * params["log_jitter"][X[1]])
return tinygp.GaussianProcess(kernel, X, diag=diag, mean=lambda x: params["mean"][x[1]])
#the GP parameters
@jax.jit
def loss(params):
return -build_gp(params).condition(y)
X = (t, band_idx)
solver = jaxopt.ScipyMinimize(fun=loss)
soln = solver.run(params)
gp = build_gp(soln.params)
df_t = []
df_flux = []
df_flux_err = []
df_filt = []
if idx%50 == 0:
plt.figure(figsize=(10,7))
for n in np.unique(band_idx):
m = band_idx == n
plt.errorbar(t[m], np.exp(y[m])-1,yerr=row['Flux_Err'][m], fmt="o", color=f"C{n}")
mu, var = gp.predict(y, X_test=(t_test, np.full_like(t_test, n, dtype=int)), return_var=True)
std = np.sqrt(var)
if idx%50 == 0:
plt.plot(t_test, np.exp(mu)-1, '.-', ms=2, color=f"C{n}")
plt.fill_between(t_test,np.exp(mu - std)-1, np.exp(mu + std)+1, color=f"C{n}", alpha=0.3, label=bands[n])
#going in order of band here--don't forget it!
matrix.append(np.exp(mu)-1)
matrix.append(std)
if idx%50 == 0:
plt.xlim((t_test[0], t_test[-1]))
plt.xlabel("Phase from Trigger (Days)")
plt.ylabel("Flux")
plt.legend()
plt.savefig(plotpath + "/GP_%i.png"%row.CID,dpi=200, bbox_inches='tight')
stacked = np.vstack(matrix)
GP_dict[row.CID] = stacked
with open(savepath + '/%s_%i.pkl'%(fn, ts), 'wb') as f:
pickle.dump(GP_dict, f)
return GP_dict | 755dec48771ae17c058565ef88087d6ec6a78aec | 5,221 |
import torch
def _featurize(inputs,model):
"""
Helper function used to featurize exemplars before feeding into
buffer.
"""
with torch.no_grad():
# Forward pass
outputs = model(*inputs).detach() #Featurize raw exem
return outputs | 191fd1b362f38309a35618284fcf3f1910a06bd6 | 5,222 |
def ligth_condition(img, args):
"""
Change ligthning condition in the image
Inputs:
img: Image to change ligthning
args: Dictionary with "gamma" argument
Return:
Image with ligthning values changed
"""
invGamma = 1.0 / args["gamma"]
table = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(img, table) | dc5273a1df8e13292147b00be45452a7ccf4a197 | 5,223 |
import numpy as np
from sklearn.metrics import mean_squared_error
def calc_RMSE(varx,vary,lats,lons,weight):
"""
Calculates root mean square weighted average
Parameters
----------
varx : 2d array
vary : 2d array
lons : 1d array of latitude
weight : string (yes or no)
Returns
-------
rmse : 1d array
Usage
-----
rmse = calc_RMSE(varx,vary,lats,lons)
"""
print('\n>>> Using calc_RMSE function!')
### Import modules
if weight == 'yes': # Computed weighted correlation coefficient
### mask
mask = 'yes'
if mask == 'yes':
latq = np.where(lats > 30)[0]
lats = lats[latq]
varx = varx[latq,:]
vary = vary[latq,:]
print('MASKING LATITUDES!')
### Create 2d meshgrid for weights
lon2,lat2 = np.meshgrid(lons,lats)
### Create 2d array of weights based on latitude
gw = np.cos(np.deg2rad(lat2))
### Calculate rmse
sq_err = (varx - vary)**2
rmse = np.sqrt((np.sum(sq_err*gw))/np.sum(gw))
elif weight == 'no':
### Root mean square error from sklearn (not weighted)
rmse = np.sqrt(mean_squared_error(varx.ravel(),vary.ravel()))
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted arguement in function!')
print('*Completed: Finished calc_RMSE function!')
return rmse | 150d08e0790f3a8ce59a2054cdc042ff6cdc2969 | 5,224 |
def sample(internal_nodes, alpha=0.5, beta=0.5, only_tree=False):
""" Generates a junction tree with order internal nodes with the junction tree expander.
Args:
internal_nodes (int): number of nodes in the underlying graph
alpha (float): parameter for the subtree kernel
beta (float): parameter for the subtree kernel
directory (string): path to
Returns:
NetworkX graph: a junction tree
"""
nodes = None
if type(internal_nodes) is int:
nodes = range(internal_nodes)
else:
nodes = internal_nodes
tree = JunctionTree()
#from trilearn.graph.junction_tree_gt import JunctionTreeGT
#tree = JunctionTreeGT()
tree.add_node(frozenset([nodes[0]]))
# print tree.nodes()
# for n in tree.nodes():
# lab = tuple(n)
# if len(n) == 1:
# lab = "("+str(list(n)[0])+")"
# tree.node[n] = {"color": "black", "label": lab}
for j in nodes[1:]:
if only_tree:
jte.sample(tree, j, alpha, beta, only_tree=only_tree)
else:
(tree, _, _, _, _, _) = jte.sample(tree, j, alpha, beta, only_tree=only_tree)
#print("vert dict: " + str(tree.gp.vert_dict))
#print("nodes: " + str(list(tree.vp.nodes)))
return tree | d0cc00e7ad96491147149aa4be396af970a9f68f | 5,225 |
def _get_version_tuple():
"""
version as a tuple
"""
return major, minor, revision | 1d82390224de07964dce7c4e7fd3e32595b189a0 | 5,226 |
def _fit_seasonal_model_with_gibbs_sampling(observed_time_series,
seasonal_structure,
num_warmup_steps=50,
num_results=100,
seed=None):
"""Builds a seasonality-as-regression model and fits it by Gibbs sampling."""
with tf.name_scope('fit_seasonal_model_with_gibbs_sampling'):
observed_time_series = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
dtype = observed_time_series.time_series.dtype
design_matrix = seasonality_util.build_fixed_effects(
num_steps=ps.shape(observed_time_series.time_series)[-2],
seasonal_structure=seasonal_structure,
dtype=dtype)
# Default priors.
# pylint: disable=protected-access
one = tf.ones([], dtype=dtype)
level_variance_prior = tfd.InverseGamma(concentration=16,
scale=16. * 0.001**2 * one)
level_variance_prior._upper_bound = one
slope_variance_prior = tfd.InverseGamma(concentration=16,
scale=16. * 0.05**2 * one)
slope_variance_prior._upper_bound = 0.01 * one
observation_noise_variance_prior = tfd.InverseGamma(
concentration=0.05, scale=0.05 * one)
observation_noise_variance_prior._upper_bound = 1.2 * one
# pylint: enable=protected-access
model = gibbs_sampler.build_model_for_gibbs_fitting(
observed_time_series=observed_time_series,
design_matrix=design_matrix,
weights_prior=tfd.Normal(loc=0., scale=one),
level_variance_prior=level_variance_prior,
slope_variance_prior=slope_variance_prior,
observation_noise_variance_prior=observation_noise_variance_prior)
return [
model,
gibbs_sampler.fit_with_gibbs_sampling(model,
observed_time_series,
num_results=num_results,
num_warmup_steps=num_warmup_steps,
seed=seed)
] | c13d4df3eca25f1a53ed27cd94e5f2b4b102013c | 5,227 |
def deskew(data, angle, dx, dz, rotate=True, return_resolution=True, out=None):
"""
Args:
data (ndarray): 3-D array to apply deskew
angle (float): angle between the objective and coverslip, in degree
dx (float): X resolution
dz (float): Z resolution
rotate (bool, optional): rotate and crop the output
return_resolution (bool, optional): return deskewed X/Z resolution
out (ndarray, optional): array to store the result
"""
angle = radians(angle)
# shift along X axis, in pixels
shift = dz * cos(angle) / dx
logger.debug(f"layer shift: {shift:.04f} px")
# estimate new size
nw, nv, nu = data.shape
nz, ny, nx = nw, nv, nu + ceil(shift * (nw - 1))
# upload texture
ch = ChannelFormatDescriptor(32, 0, 0, 0, runtime.cudaChannelFormatKindFloat)
arr = CUDAarray(ch, nu, nw)
res = ResourceDescriptor(runtime.cudaResourceTypeArray, cuArr=arr)
address_mode = (runtime.cudaAddressModeBorder, runtime.cudaAddressModeBorder)
tex = TextureDescriptor(
address_mode, runtime.cudaFilterModeLinear, runtime.cudaReadModeElementType
)
# transpose
data = np.swapaxes(data, 0, 1)
data = np.ascontiguousarray(data)
data_in = data.astype(np.float32)
data_out = cp.empty((ny, nz, nx), np.float32)
for i, layer in enumerate(data_in):
arr.copy_from(layer) # TODO use stream
texobj = TextureObject(res, tex)
kernels["shear_kernel"](
(ceil(nx / 16), ceil(nz / 16)),
(16, 16),
(data_out[i, ...], texobj, nx, nz, nu, np.float32(shift)),
)
data_out = cp.swapaxes(data_out, 0, 1)
data_out = cp.asnumpy(data_out)
data_out = data_out.astype(data.dtype)
if return_resolution:
# new resolution
dz *= sin(angle)
return data_out, (dz, dx)
else:
return data_out | a39ff1d48777c266e83358e272b6ba7d6d7ce894 | 5,228 |
import os
import tqdm
def process_data(path,stage = 'train'):
"""
train
test
sample_submission
"""
# loading the data
df = pd.read_csv(os.path.join(path,f'{stage}.csv'))
MASK = -1 # fill NA with -1
T_HIST = 10 # time history, last 10 games
# for cols "date", change to datatime
for col in df.filter(regex='date', axis=1).columns:
df[col] = pd.to_datetime(df[col])
# Creating some feature engineering
print('processing hitorical data...')
for i in tqdm(range(1, 11)): # range from 1 to 10
# Feat. difference of days
df[f'home_team_history_match_DIFF_day_{i}'] = (df['match_date'] - df[f'home_team_history_match_date_{i}']).dt.days
df[f'away_team_history_match_DIFF_days_{i}'] = (df['match_date'] - df[f'away_team_history_match_date_{i}']).dt.days
# Feat. difference of scored goals
df[f'home_team_history_DIFF_goal_{i}'] = df[f'home_team_history_goal_{i}'] - df[f'home_team_history_opponent_goal_{i}']
df[f'away_team_history_DIFF_goal_{i}'] = df[f'away_team_history_goal_{i}'] - df[f'away_team_history_opponent_goal_{i}']
# Results: multiple nested where
df[f'home_team_result_{i}'] = np.where(df[f'home_team_history_DIFF_goal_{i}'] > 0, 1,
(np.where(df[f'home_team_history_DIFF_goal_{i}'] == 0, 0,
np.where(df[f'home_team_history_DIFF_goal_{i}'].isna(),np.nan, -1))))
df[f'away_team_result_{i}'] = np.where(df[f'away_team_history_DIFF_goal_{i}'] > 0, 1,
(np.where(df[f'away_team_history_DIFF_goal_{i}'] == 0, 0,
np.where(df[f'away_team_history_DIFF_goal_{i}'].isna(), np.nan, -1))))
# Feat. difference of rating ("modified" ELO RATING)
df[f'home_team_history_ELO_rating_{i}'] = 1 / (1 + 10 ** ((df[f'home_team_history_opponent_rating_{i}'] - df[f'home_team_history_rating_{i}']) / 10))
df[f'away_team_history_ELO_rating_{i}'] = 1 / (1 + 10 ** ((df[f'away_team_history_opponent_rating_{i}'] - df[f'away_team_history_rating_{i}']) / 10))
# df[f'away_team_history_DIFF_rating_{i}'] = - df[f'away_team_history_opponent_rating_{i}']
# Feat. same coach id
df[f'home_team_history_SAME_coaX_{i}'] = np.where(df['home_team_coach_id'] == df[f'home_team_history_coach_{i}'], 1, 0)
df[f'away_team_history_SAME_coaX_{i}'] = np.where(df['away_team_coach_id'] == df[f'away_team_history_coach_{i}'], 1, 0)
# Feat. same league id
#df[f'home_team_history_SAME_leaG_{i}'] = np.where(df['league_id'] == df[f'home_team_history_league_id_{i}'],1, 0)
#df[f'away_team_history_SAME_leaG_{i}'] = np.where(df['league_id'] == df[f'away_team_history_league_id_{i}'],1, 0)
# Fill NA with -1
print('done')
df.fillna(MASK, inplace=True)
# le = LabelEncoder()
# df['home_team_name'] = le.fit_transform(df['home_team_name'])
# df['away_team_name'] = le.fit_transform(df['away_team_name'])
# df['league_name'] = le.fit_transform(df['league_name'])
# save targets
# y_train = train[['target_int']].to_numpy().reshape(-1, 1)
id = df['id'].copy()
drop_list = ['id', 'target', 'home_team_name', 'away_team_name']
if stage =='train':
y = df['target'].copy()
drop_list.append('target')
else:
y = None
# keep only some features
df.drop(drop_list, axis=1, inplace=True)
df['is_cup'] = df['is_cup'].replace({True: 1, False: 0})
# Exclude all date, league, coach columns
df.drop(df.filter(regex='date').columns, axis=1, inplace=True)
df.drop(df.filter(regex='league').columns, axis=1, inplace=True)
df.drop(df.filter(regex='coach').columns, axis=1, inplace=True)
# Store feature names
feature_names = list(df.columns)
# Scale features using statistics that are robust to outliers
RS = RobustScaler()
df = RS.fit_transform(df)
# Back to pandas.dataframe
df = pd.DataFrame(df, columns=feature_names)
df = pd.concat([id, df], axis=1)
# Pivot dataframe to create an input array for the LSTM network
feature_groups = ["home_team_history_is_play_home", "home_team_history_is_cup",
"home_team_history_goal", "home_team_history_opponent_goal",
"home_team_history_rating", "home_team_history_opponent_rating",
"away_team_history_is_play_home", "away_team_history_is_cup",
"away_team_history_goal", "away_team_history_opponent_goal",
"away_team_history_rating", "away_team_history_opponent_rating",
"home_team_history_match_DIFF_day", "away_team_history_match_DIFF_days",
"home_team_history_DIFF_goal", "away_team_history_DIFF_goal",
"home_team_history_ELO_rating", "away_team_history_ELO_rating",
"home_team_history_SAME_coaX", "away_team_history_SAME_coaX",
"home_team_history_SAME_leaG", "away_team_history_SAME_leaG",
"home_team_result", "away_team_result"]
# Pivot dimension (id*features) x time_history
x_pivot = pd.wide_to_long(df, stubnames=feature_groups,i='id', j='time', sep='_', suffix='\d+')
# Trying to keep the same id order
x = pd.merge(id, x_pivot, on="id")
x = x.drop(['id'], axis=1).to_numpy().reshape(-1, T_HIST, x_pivot.shape[-1])
return x,y | ff6542c8a4f7366c2a1b612d7b41e3aa539e34a4 | 5,229 |
import pandas as pd
import numpy as np
def rm_standard_dev(var,window):
"""
Smoothed standard deviation
"""
print('\n\n-----------STARTED: Rolling std!\n\n')
rollingstd = np.empty((var.shape))
for ens in range(var.shape[0]):
for i in range(var.shape[2]):
for j in range(var.shape[3]):
series = pd.Series(var[ens,:,i,j])
rollingstd[ens,:,i,j] = series.rolling(window).std().to_numpy()
newdata = rollingstd[:,window:,:,:]
print('-----------COMPLETED: Rolling std!\n\n')
return newdata | d37cfa3c756f8fc062a28ac078e4e16557282951 | 5,230 |
import os
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
image_index = 0
print(folder)
for image in os.listdir(folder):
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[image_index, :, :] = image_data
image_index += 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
num_images = image_index
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset | 30324858e8481b348f004dfbc39bf790cd0ca930 | 5,231 |
def visualizeTimeSeriesCategorization(dataName, saveDir, numberOfLagsToDraw=3, autocorrelationBased=True):
"""Visualize time series classification.
Parameters:
dataName: str
Data name, e.g. "myData_1"
saveDir: str
Path of directories pointing to data storage
numberOfLagsToDraw: boolean, Default 3
First top-N lags (or frequencies) to draw
autocorrelationBased: boolean, Default True
Whether autocorrelation or frequency based
Returns:
None
Usage:
visualizeTimeSeriesClassification('myData_1', '/dir1/dir2/')
"""
info = 'Autocorrelations' if autocorrelationBased else 'Periodograms'
def internal(className):
print('\n\n%s of Time Series:'%(className))
clusteringObject = dataStorage.read(saveDir + 'consolidatedGroupsSubgroups/' + dataName + '_%s_%s'%(className,info) + '_GroupsSubgroups')
if clusteringObject is None:
print('Clustering object not found')
return
print('Plotting Dendrogram with Heatmaps.')
visualizationFunctions.makeDendrogramHeatmapOfClusteringObject(clusteringObject, saveDir, dataName + '_%s_%sBased'%(className,info), AutocorrNotPeriodogr=autocorrelationBased)
return
for lag in range(1,numberOfLagsToDraw + 1):
internal('LAG%s'%(lag))
internal('SpikeMax')
internal('SpikeMin')
return None | b2fcac2179e3a689ee73e13519e2f4ad77c59037 | 5,232 |
from typing import Dict
def refund(payment_information: Dict, connection_params) -> Dict:
"""Refund a payment using the culqi client.
But it first check if the given payment instance is supported
by the gateway.
It first retrieve a `charge` transaction to retrieve the
payment id to refund. And return an error with a failed transaction
if the there is no such transaction, or if an error
from culqi occurs during the refund."""
error = check_payment_supported(payment_information=payment_information)
response_has_errors = False
if error:
response = get_error_response(
payment_information.amount, error=error)
else:
setup_client(**connection_params)
try:
payload = format_culqui_payload(
payment_information, TransactionKind.REFUND)
response = culqipy.Refund.create(payload)
print(f"DATA::response::{response}")
# Fix: get specific errors
except Exception as error:
response_has_errors = True
response = get_error_response(
payment_information.amount, error=error)
if not response_has_errors:
if response.get('object', None) == 'error':
error = response.get('user_message', None)
if error is None:
error = response.get('merchant_message', None)
if error is None:
error = 'Unkonw error!'
response = get_error_response(
payment_information.amount, error=error,
id=payment_information.token)
else:
clean_culqi_response(response)
return _generate_response(
payment_information=payment_information,
kind=TransactionKind.REFUND, data=response) | 75dff392c0748a1408eb801ad78ef65be988026c | 5,233 |
import argparse
import os
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Howler',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text',
metavar='str',
help='Input text or file')
parser.add_argument('-o',
'--outfile',
help='Output filename',
metavar='str',
type=str,
default='')
args = parser.parse_args()
#if the args.text is a file name (checks using os.path.isfile)
if os.path.isfile(args.text):
args.text = open(args.text).read().rstrip()
#then open args.text, read its name and strip the whitespace out
return args | 906e38ad510016068ca7d431c681b4680ee56f5d | 5,234 |
import torch
from typing import Tuple
def _ssim_map(
X: torch.Tensor,
Y: torch.Tensor,
data_range: float,
win: torch.Tensor,
K: Tuple[float, float] = (0.01, 0.03),
scales: Tuple[float, float, float] = (1, 1, 1),
gradient_based: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Given two tensors it calculates the resulting SSIM and contrast sensitivity maps.
Args:
X (torch.Tensor): images
Y (torch.Tensor): images
data_range (float): value range of input images.
win (torch.Tensor): 1-D gauss kernel
K (Tuple[float,float]): stability constants (K1, K2). Defaults to (0.01, 0.03).
gradient_based (bool): whether or not to use gradient based ssim.
Returns:
torch.Tensor: SSIM map
torch.Tensor: contrast sensitivity map
References:
[1] Wang, Z., Bovik, A.C., Sheikh, H.R. and Simoncelli, E.P., 2004.
Image quality assessment: from error visibility to structural similarity.
IEEE transactions on image processing, 13(4), pp.600-612.
"""
K1, K2 = K
alpha, beta, gamma = scales
C1 = (K1 * data_range) ** 2
C2 = (K2 * data_range) ** 2
C3 = C2 / 2
win = win.to(X.device, dtype=X.dtype)
# TODO: Replace this with fftconvolution
mu1 = _gaussian_filter(X, win)
mu2 = _gaussian_filter(Y, win)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
print(f"mu1: {torch.isnan(mu1).any()}")
print(f"mu2: {torch.isnan(mu2).any()}")
print(f"mu1_sq: {torch.isnan(mu1_sq).any()}")
print(f"mu2_sq: {torch.isnan(mu2_sq).any()}")
print(f"mu1_mu2: {torch.isnan(mu1_mu2).any()}")
# Ref 1 - Sec 3.B - Eq 6
luminance = (2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)
print(f"Luminance: {torch.isnan(luminance).any()}")
if gradient_based:
X = _gradient_map(input=X)
Y = _gradient_map(input=Y)
mu1 = _gaussian_filter(X, win)
mu2 = _gaussian_filter(Y, win)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
# TODO: Understand why it is squared
sigma1_sq = _gaussian_filter(X * X, win) - mu1_sq
sigma2_sq = _gaussian_filter(Y * Y, win) - mu2_sq
sigma12 = _gaussian_filter(X * Y, win) - mu1_mu2
print(torch.min(sigma1_sq))
print(torch.min(sigma2_sq))
sigma1 = torch.sqrt(sigma1_sq)
sigma2 = torch.sqrt(sigma2_sq)
print(f"sigma1: {torch.isnan(sigma1).any()}")
print(f"sigma2: {torch.isnan(sigma2).any()}")
print(f"sigma12: {torch.isnan(sigma12).any()}")
print(f"sigma1_sq: {torch.isnan(sigma1_sq).any()}")
print(f"sigma2_sq: {torch.isnan(sigma2_sq).any()}")
# Ref 1 - Sec 3.B - Eq 9
contrast = (2 * sigma1 * sigma2 + C2) / (sigma1_sq + sigma2_sq + C2)
print(f"Contrast: {torch.isnan(contrast).any()}")
# Ref 1 - Sec 3.B - Eq 10
structure = (sigma12 + C3) / (sigma1 * sigma2 + C3)
print(f"Structure {torch.isnan(structure).any()}")
# Ref 1 - Sec 3.B - Eq 12
luminance = torch.pow(luminance, alpha)
contrast = torch.pow(contrast, beta)
structure = torch.pow(structure, gamma)
ssim_map = luminance * contrast * structure
return ssim_map, contrast | 3a1d34497228bb95d0bc295475fb0df38220107b | 5,235 |
import numbers
def check_random_state(seed):
"""Turn `seed` into a `np.random.RandomState` instance.
Parameters
----------
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
seed : {`numpy.random.Generator`, `numpy.random.RandomState`}
Random number generator.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.default_rng(seed)
if isinstance(seed, (np.random.RandomState, np.random.Generator)):
return seed
raise ValueError(
"%r cannot be used to seed a numpy.random.RandomState" " instance" % seed
) | 57390806329776c77977a27e18e78fdad298fef9 | 5,236 |
def power3_sum_2method():
"""
Input:
nothing, it have everything it needs.
Output:
sum: summ of all numbers which is power of 3
and fit in between 0 and upper bound == 1000000
"""
k = 0
sum = 0
while True:
a = 3**k
k += 1
if a < 1000000:
sum += a
else:
break
return sum | b86bfaeb2418e183a78054d2a4b76c58d58be388 | 5,237 |
def bitwise_right_shift(rasters, extent_type="FirstOf", cellsize_type="FirstOf", astype=None):
"""
The BitwiseRightShift operation
The arguments for this function are as follows:
:param rasters: array of rasters. If a scalar is needed for the operation, the scalar can be a double or string
:param extent_type: one of "FirstOf", "IntersectionOf", "UnionOf", "LastOf"
:param cellsize_type: one of "FirstOf", "MinOf", "MaxOf, "MeanOf", "LastOf"
:param astype: output pixel type
:return: the output raster
"""
return local(rasters, 15, extent_type=extent_type, cellsize_type=cellsize_type, astype=astype) | 8d07a60a514466ee4aa0b15b0b442fb71b3347ed | 5,238 |
import re
def strip_comments(line):
"""Strips comments from a line and return None if the line is empty
or else the contents of line with leading and trailing spaces removed
and all other whitespace collapsed"""
commentIndex = line.find('//')
if commentIndex is -1:
commentIndex = len(line)
line = re.sub(r'\s+', ' ', line[:commentIndex].strip())
if line == '':
return None
else:
return line | 09579031294d7b5787c97fa81807fa5ecfe12329 | 5,239 |
import logging
def fft_pxscale(header,wave):
"""Compute conversion scale from telescope space to sky space.
Parameters
----------
ima : array
2D Telescope pupil model.
Returns
-------
fftscale : float
The frequency scale in sky space.
Example
-------
.. code-block:: python
fftscale = fft_pxscale(ima)
"""
#size of the image. This should be taken from the header.
gridsize = header['NAXIS1']
#pixel scale of the image. This should be taken from the header.
pxscale_mod = header['PIXSCALE'] #in meters/px
#1D FFT of the gridsize.
fft_freq=np.fft.fftfreq(gridsize,pxscale_mod)
#wavelength of the desires psf. This is a input of the user, wavelength in microns
wave = (getQuantity(wave,recognized_units=UNITS['WAVE']))
lam = wave.to(u.m) #in meters
#re-orginizing the 1D FFT to match with the grid.
roll=np.floor(gridsize//2).astype("int")
freq = np.fft.fftshift(fft_freq)
##
## pxscale -> fftscale
fftscale=np.diff(freq)[0] ## cycles / mas per pixel in FFT image
mas2rad=np.deg2rad(1./3600000.) ## mas per rad
fftscale = fftscale/mas2rad * lam ## meters baseline per px in FFT image at a given wavelength
logging.info("Pixel scale in PSF image is: %g mas per pixel" % fftscale.value)
return fftscale.value | 6935bdefe96aec771704a79952cfc25ffb55e8bb | 5,240 |
def parse_git_submodules(gitmodules_data):
"""Parse a .gitmodules file to extract a { name -> url } map from it."""
result = {}
# NOTE: configparser.ConfigParser() doesn't seem to like the file
# (i.e. read_string() always returns None), so do the parsing
# manually here.
section_name = None
in_submodule_section = False
submodule_name = None
submodule_prefix = 'submodule "'
urls = {}
branches = {}
for line in gitmodules_data.splitlines():
if line.startswith('['):
section_name = line[1:-1]
is_submodule_section = section_name.startswith(submodule_prefix)
if is_submodule_section:
submodule_name = section_name[len(submodule_prefix):-1]
elif is_submodule_section:
key, _, value = line.strip().partition('=')
if not value:
continue
key = key.strip()
value = value.strip()
if key == 'url':
urls[submodule_name] = value
elif key == 'branch':
branches[submodule_name] = value
result = {}
for submodule, url in urls.iteritems():
branch = branches.get(submodule)
if not branch:
branch = get_git_remote_ref(url, 'heads/master')
result[submodule] = '%s@%s' % (url, branch)
return result | 78d01ec70b68164189a2ea775c6084e256116d0a | 5,241 |
import pathlib
from typing import Dict
import json
def get_model_cases(dir_path: pathlib.Path) -> Dict[str, Dict[str, str]]:
"""
Returns the Zen model case for each test if it exists.
:param dir_path: The path to the directory containing the DIFFERENCES directory.
"""
model_cases = defaultdict(dict) # type: Dict[str, Dict[str, str]]
queries_dir = dir_path / QUERIES
expected_res_dir = dir_path / QUERY_RESPONSES
tag_dir = None
if queries_dir.exists() and queries_dir.is_dir():
tag_dir = queries_dir
elif expected_res_dir.exists() and expected_res_dir.is_dir():
tag_dir = expected_res_dir
if isinstance(tag_dir, pathlib.Path):
for queries_file in tag_dir.iterdir():
with open(queries_file, 'r') as qf_fp:
queries_info = json.load(qf_fp)
for qinfo in queries_info:
if "ZenResponseTag" in qinfo:
query_str = qinfo["Query"]["Name"] + ":" +\
qinfo["Query"]["Type"]
model_cases[queries_file.stem][query_str] = qinfo["ZenResponseTag"]
return model_cases | d35b4cf59cf9b99a6aeb9e05e0af3ee342b11f3b | 5,242 |
def _format_date(event):
"""Returns formated date json object for event"""
old_date = event["date"]
term = event["term"]
dates = old_date.split("-")
if len(dates) == 1:
is_range = False
else:
is_range = True
is_range = (len(dates) > 1)
if is_range:
start_date = dates[0]
end_date = dates[-1]
else:
start_date = dates[0]
end_date = dates[0]
new_start_date = _format_date_string(start_date, term)
new_end_date = _format_date_string(end_date, term)
date = {
"start_date": new_start_date,
"end_date": new_end_date,
"range": is_range,
}
return date | aa8bf9a41fe30b664920e895cdc31d6993a408b2 | 5,243 |
def fetch(bibcode, filename=None, replace=None):
"""
Attempt to fetch a PDF file from ADS. If successful, then
add it into the database. If the fetch succeeds but the bibcode is
not in th database, download file to current folder.
Parameters
----------
bibcode: String
ADS bibcode of entry to update.
filename: String
Filename to assign to the PDF file. If None, get from
guess_name() funcion.
Replace: Bool
If True, enforce replacing a PDF regardless of a pre-existing one.
If None (default), only ask when fetched PDF comes from arxiv.
Returns
-------
filename: String
If successful, return the full path of the file name.
If not, return None.
"""
arxiv = False
print('Fetching PDF file from Journal website:')
req = request_ads(bibcode, source='journal')
if req is None:
return
if req.status_code != 200:
print('Fetching PDF file from ADS website:')
req = request_ads(bibcode, source='ads')
if req is None:
return
if req.status_code != 200:
print('Fetching PDF file from ArXiv website:')
req = request_ads(bibcode, source='arxiv')
arxiv = True
if replace is None:
replace = False
if req is None:
return
if replace is None:
replace = True
if req.status_code == 200:
if bm.find(bibcode=bibcode) is None:
if filename is None:
filename = f'{bibcode}.pdf'
with builtin_open(filename, 'wb') as f:
f.write(req.content)
print(f"Saved PDF to: '{filename}'.\n"
"(Note that BibTex entry is not in the Bibmanager database)")
else:
filename = set_pdf(
bibcode, bin_pdf=req.content, filename=filename, arxiv=arxiv,
replace=replace)
return filename
print('Could not fetch PDF from any source.') | 7d264df3f0eab896a9cb4858e7b19e2590d8142b | 5,244 |
def crop_multi(x, wrg, hrg, is_random=False, row_index=0, col_index=1):
"""Randomly or centrally crop multiple images.
Parameters
----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.crop``.
Returns
-------
numpy.array
A list of processed images.
"""
h, w = x[0].shape[row_index], x[0].shape[col_index]
if (h < hrg) or (w < wrg):
raise AssertionError("The size of cropping should smaller than or equal to the original image")
if is_random:
h_offset = int(np.random.uniform(0, h - hrg))
w_offset = int(np.random.uniform(0, w - wrg))
results = []
for data in x:
results.append(data[h_offset:hrg + h_offset, w_offset:wrg + w_offset])
return np.asarray(results)
else:
# central crop
h_offset = int(np.floor((h - hrg) / 2.))
w_offset = int(np.floor((w - wrg) / 2.))
results = []
for data in x:
results.append(data[h_offset:h - h_offset, w_offset:w - w_offset])
return np.asarray(results) | 61593029455a880d5309e8343cf4f6d1049f598f | 5,245 |
def value_loss_given_predictions(value_prediction,
rewards,
reward_mask,
gamma,
epsilon,
value_prediction_old=None):
"""Computes the value loss given the prediction of the value function.
Args:
value_prediction: np.ndarray of shape (B, RT+1, 1)
rewards: np.ndarray of shape (B, RT) of rewards.
reward_mask: np.ndarray of shape (B, RT), the mask over rewards.
gamma: float, discount factor.
epsilon: float, clip-fraction, used if value_value_prediction_old isn't None
value_prediction_old: np.ndarray of shape (B, RT+1, 1) of value predictions
using the old parameters. If provided, we incorporate this in the loss as
well. This is from the OpenAI baselines implementation.
Returns:
Pair (value_loss, summaries), where value_loss is the average L2 value loss,
averaged over instances where reward_mask is 1. Summaries is a dict of
summaries collected during value loss computation.
"""
B, RT = rewards.shape # pylint: disable=invalid-name
assert (B, RT) == reward_mask.shape
assert (B, RT + 1) == value_prediction.shape
value_prediction = value_prediction[:, :-1] * reward_mask # (B, RT)
r2g = rewards_to_go(rewards, reward_mask, gamma=gamma) # (B, RT)
loss = (value_prediction - r2g)**2
# From the baselines implementation.
if value_prediction_old is not None:
value_prediction_old = value_prediction_old[:, :-1] * reward_mask # (B, RT)
v_clipped = value_prediction_old + np.clip(
value_prediction - value_prediction_old, -epsilon, epsilon)
v_clipped_loss = (v_clipped - r2g)**2
loss = np.maximum(v_clipped_loss, loss)
# Take an average on only the points where mask != 0.
value_loss = np.sum(loss) / np.sum(reward_mask)
summaries = {
'value_loss': value_loss,
}
return (value_loss, summaries) | 5896dd57e1e9d05eb71e5b31aab4071b61d0fdbf | 5,246 |
def build_pkt(pkt):
"""Build and return a packet and eth type from a dict."""
def serialize(layers):
"""Concatenate packet layers and serialize."""
result = packet.Packet()
for layer in reversed(layers):
result.add_protocol(layer)
result.serialize()
return result
layers = []
assert 'eth_dst' in pkt and 'eth_src' in pkt
ethertype = None
if 'arp_source_ip' in pkt and 'arp_target_ip' in pkt:
ethertype = ether.ETH_TYPE_ARP
arp_code = pkt.get('arp_code', arp.ARP_REQUEST)
layers.append(arp.arp(
src_ip=pkt['arp_source_ip'],
dst_ip=pkt['arp_target_ip'],
opcode=arp_code))
elif 'ipv6_src' in pkt and 'ipv6_dst' in pkt:
ethertype = ether.ETH_TYPE_IPV6
if 'router_solicit_ip' in pkt:
layers.append(icmpv6.icmpv6(
type_=icmpv6.ND_ROUTER_SOLICIT))
elif 'neighbor_advert_ip' in pkt:
layers.append(icmpv6.icmpv6(
type_=icmpv6.ND_NEIGHBOR_ADVERT,
data=icmpv6.nd_neighbor(
dst=pkt['neighbor_advert_ip'],
option=icmpv6.nd_option_sla(hw_src=pkt['eth_src']))))
elif 'neighbor_solicit_ip' in pkt:
layers.append(icmpv6.icmpv6(
type_=icmpv6.ND_NEIGHBOR_SOLICIT,
data=icmpv6.nd_neighbor(
dst=pkt['neighbor_solicit_ip'],
option=icmpv6.nd_option_sla(hw_src=pkt['eth_src']))))
elif 'echo_request_data' in pkt:
layers.append(icmpv6.icmpv6(
type_=icmpv6.ICMPV6_ECHO_REQUEST,
data=icmpv6.echo(id_=1, seq=1, data=pkt['echo_request_data'])))
layers.append(ipv6.ipv6(
src=pkt['ipv6_src'],
dst=pkt['ipv6_dst'],
nxt=inet.IPPROTO_ICMPV6))
elif 'ipv4_src' in pkt and 'ipv4_dst' in pkt:
ethertype = ether.ETH_TYPE_IP
proto = inet.IPPROTO_IP
if 'echo_request_data' in pkt:
echo = icmp.echo(id_=1, seq=1, data=pkt['echo_request_data'])
layers.append(icmp.icmp(type_=icmp.ICMP_ECHO_REQUEST, data=echo))
proto = inet.IPPROTO_ICMP
net = ipv4.ipv4(src=pkt['ipv4_src'], dst=pkt['ipv4_dst'], proto=proto)
layers.append(net)
elif 'actor_system' in pkt and 'partner_system' in pkt:
ethertype = ether.ETH_TYPE_SLOW
layers.append(slow.lacp(
version=1,
actor_system=pkt['actor_system'],
actor_port=1,
partner_system=pkt['partner_system'],
partner_port=1,
actor_key=1,
partner_key=1,
actor_system_priority=65535,
partner_system_priority=1,
actor_port_priority=255,
partner_port_priority=255,
actor_state_defaulted=0,
partner_state_defaulted=0,
actor_state_expired=0,
partner_state_expired=0,
actor_state_timeout=1,
partner_state_timeout=1,
actor_state_collecting=1,
partner_state_collecting=1,
actor_state_distributing=1,
partner_state_distributing=1,
actor_state_aggregation=1,
partner_state_aggregation=1,
actor_state_synchronization=pkt['actor_state_synchronization'],
partner_state_synchronization=1,
actor_state_activity=0,
partner_state_activity=0))
elif 'chassis_id' in pkt and 'port_id' in pkt:
ethertype = ether.ETH_TYPE_LLDP
return valve_packet.lldp_beacon(
pkt['eth_src'], pkt['chassis_id'], str(pkt['port_id']), 1,
org_tlvs=pkt.get('org_tlvs', None),
system_name=pkt.get('system_name', None))
assert ethertype is not None, pkt
if 'vid' in pkt:
tpid = ether.ETH_TYPE_8021Q
layers.append(vlan.vlan(vid=pkt['vid'], ethertype=ethertype))
else:
tpid = ethertype
eth = ethernet.ethernet(
dst=pkt['eth_dst'],
src=pkt['eth_src'],
ethertype=tpid)
layers.append(eth)
result = serialize(layers)
return result | afd84446d3bb545b03b9d4c42d80f096b6665342 | 5,247 |
def make_file_prefix(run, component_name):
"""
Compose the run number and component name into string prefix
to use with filenames.
"""
return "{}_{}".format(component_name, run) | 73ef37d75d9e187ee49ee058958c3b8701185585 | 5,248 |
def identifier_needs_escaping(text):
"""
Slightly slow, but absolutely correct determination if a given symbol _must_ be escaped.
Necessary when you might be generating column names that could be a reserved keyword.
>>> identifier_needs_escaping("my_column")
False
>>> identifier_needs_escaping("my_column3424")
False
>>> identifier_needs_escaping("my column with spaces")
True
>>> identifier_needs_escaping("mycolumn;")
True
>>> identifier_needs_escaping("SELECT")
True
>>> identifier_needs_escaping("my_column.blah")
True
>>> identifier_needs_escaping("UPDATE")
True
>>> identifier_needs_escaping("column ")
True
"""
# TODO: Replace with custom caching decorator?
global _ident_needs_escaping_cache
if text not in _ident_needs_escaping_cache:
try:
ast = sql_subexpr_ast(text, "identifier")
_ident_needs_escaping_cache[text] = not (
isinstance(ast, Identifier) and ast.text == text
)
except Exception as e:
_ident_needs_escaping_cache[text] = True
return _ident_needs_escaping_cache[text] | 265f7acd1e92a954758f44eb03247b0b935d6d4d | 5,249 |
from typing import Dict
def initialize_lock_and_key_ciphers() -> Dict[str, VigenereCipher]:
"""[summary]
Returns:
Dict[VigenereCipher]: [description]"""
ciphers = {}
with open(CIPHER_RESOURCE, "r") as cipher_resource_file:
cipher_data = load(cipher_resource_file, Loader=FullLoader)
for cipher_key_name, cipher_keys in cipher_data.items():
ciphers[cipher_key_name] = VigenereCipher(key=cipher_keys['key'], alphabet=cipher_keys['alphabet'])
return ciphers | 1c0a27b36b4c0524b77dcb5c44a3bc840797b226 | 5,250 |
def add_service():
"""
Used to register a new service
"""
form = ServiceForm()
if form.validate_on_submit():
try:
srv = Services()
srv.populate_from_form(form)
srv.authentication.value = {"db":request.form.get('authdb'),"user":request.form.get('authuser'),"pswd":request.form.get("authpass")}
srv.save()
flash('Datele au fost adaugate!', category='alert-success')
return redirect(url_for('services.list_services'))
except Exception as err:
flash('Datele nu pot fi adaugate!', category='alert-danger')
return render_template('services/settings/add.html', pagetitle='Adauga serviciu', form=form) | 56ce52c293d42710a9d4d5ac57b21f5ba1c0c0ac | 5,251 |
def f_columnas_pips(datos):
"""
Parameters
----------
datos : pandas.DataFrame : df con información de transacciones ejecutadas en Oanda,
después de haber ejecutado f_columnas_tiempos
Returns
-------
datos : pandas.DataFrame : df modificado
Debugging
-------
datos = 'f_leer_archivo("archivo_tradeview_1.csv")
"""
datos['pips'] = [(datos.closeprice[i]-datos.openprice[i])*f_pip_size(datos.symbol[i]) for i in range(len(datos))]
datos['pips'][datos.type=='sell'] *= -1
datos['pips_acm'] = datos.pips.cumsum()
datos['profit_acm'] = datos['profit'].cumsum()
return datos.copy() | 5d6d47d23dbe16f3619b7e1264d30e91a9acd8ce | 5,252 |
def parse_resolution(resolution):
"""
return: width, height, resolution
"""
resolution = resolution.strip()
splits = resolution.split(',')
return int(splits[0]), int(splits[1]), int(splits[2]) | de937e440c4540d11cedd868e3f4a046baa99f22 | 5,253 |
def link_cube(cube, locale, provider=None, namespace=None,
ignore_missing=False):
"""Links dimensions to the `cube` in the `context` object. The `context`
object should implement a function `dimension(name, locale, namespace,
provider)`. Modifies cube in place, returns the cube.
"""
# TODO: change this to: link_cube(cube, locale, namespace, provider)
# Assumption: empty cube
linked = set()
for dim_name in list(cube.dimension_links.keys()):
if dim_name in linked:
raise ModelError("Dimension '{}' linked twice"
.format(dim_name))
try:
dim = find_dimension(dim_name, locale,
provider=provider,
namespace=namespace)
except TemplateRequired as e:
raise ModelError("Dimension template '%s' missing" % dim_name)
if not dim and not ignore_missing:
raise CubesError("Dimension '{}' not found.".format(dim_name))
cube.link_dimension(dim)
return cube | 09062ff3fd9dcfeeac7a746557c7f5384e4560a6 | 5,254 |
import argparse
def _parser() -> argparse.Namespace:
"""Take care of all the argparse stuff.
:returns: the args
"""
# parser = GooeyParser(description='Remove : from data files')
parser = argparse.ArgumentParser(description='Combines Nods using ')
parser.add_argument('listspectra', help='List of spectra to combine.', default=False)
parser.add_argument('-o', "--optimal-nods", help="Optimal nod bool matrix file.")
parser.add_argument("-s", "--spectralcoords", default=False, action="store_true",
help="Turn spectra into spectral coordinates first before adding. Default=False")
parser.add_argument("-n", "--nod_num", help="Number of nods in the nod cycle, default=8", default=8, type=int)
parser.add_argument("-c", "--combination", help="Nod combination method, default=all means do all three.",
default="all", choices=["all", "optimal", "non-opt", "mix"])
parser.add_argument("-u", "--unnorm", help="Combine the un-normalized nods.", action="store_true")
parser.add_argument("--snr", help="Show snr of continuum.", action="store_true")
parser.add_argument("-p", "--plot", help="Show the plots.", action="store_true")
parser.add_argument("--output_verify", help="Fits file verification mode", default="fix+warn")
parser.add_argument("-r", "--overwrite", help="Overwrite output file if already exists", action="store_true")
args = parser.parse_args()
return args | 3edcefc24898d15fd67925729590710a4f0d1fb5 | 5,255 |
import inspect
def get_arguments(func):
"""Returns list of arguments this function has."""
if hasattr(func, '__code__'):
# Regular function.
return inspect.getargspec(func).args
elif hasattr(func, '__call__'):
# Callable object.
print(func)
return _get_arguments(func.__call__)
elif hasattr(func, 'func'):
# Partial function.
return _get_arguments(func.func) | f93133f20c819c590c30e25b6c339c07732daebe | 5,256 |
def _check(isamAppliance, name):
"""
Check if suffix exists
"""
ret_obj = get(isamAppliance)
check_value, warnings = False, ret_obj['warnings']
if warnings == []:
for suffix in ret_obj['data']:
if suffix['name'] == name:
logger.info("Suffix found in embedded ldap: " + name)
check_value = True
return check_value, suffix['id'], warnings
logger.info("Suffix *not* found in embedded ldap: " + name)
return check_value, None, warnings | be2a6226ebdccb92ec3361df79e50165a22d6981 | 5,257 |
def check_listening_address(address: str) -> bool:
"""Check entered ip address for validity."""
if address == 'localhost':
return True
return address in get_local_addresses() | eaa5cecfee4e8be2947150a537213f4159ee6baf | 5,258 |
import base64
def multibase_b64decode(data):
"""
Follow forge's base64 urlsafe encode convention to decode string
Args:
data(string): encoded string
Returns: bytes
Examples:
>>> multibase_b64decode('aGVsbG8')
b'hello'
"""
if isinstance(data, str):
data = data.encode()
return base64.urlsafe_b64decode(
(data + b'=' * (-len(data) % 4))) | fdbc0f937e33d7994737a3a515973598cac3debd | 5,259 |
from typing import List
def parse_ordering_params(param: List[str]) -> List[str]:
"""
Ignores the request to sort by "ord".
Returns a sorting order based on the params and includes "readable_id"
sorting in passed params if the sorting request contains title
otherwise, it returns the requested order.
"""
if "ord" in param:
order = []
elif "title" in param:
prefix = "-" if param[0] == "-" else ""
order = ["{prefix}coursepage__course__readable_id".format(prefix=prefix), param]
else:
order = [param]
return order | a6a5f4665515a292ad2367945a6b8407000d656a | 5,260 |
def file_senzing_rabbitmq():
"""#!/usr/bin/env bash
# --- Functions ---------------------------------------------------------------
function up {
echo -ne "\033[2K${CONTAINER_NAME} status: starting...\r"
mkdir -p ${RABBITMQ_DIR}
chmod 777 ${RABBITMQ_DIR}
if [ "${CONTAINER_VERSION}" == "latest" ]
then
${SENZING_SUDO} docker pull ${SENZING_DOCKER_REGISTRY_URL}/bitnami/rabbitmq:${CONTAINER_VERSION} >> ${CONTAINER_LOG} 2>&1
fi
${SENZING_SUDO} docker run \\
--detach \\
--env RABBITMQ_PASSWORD=${SENZING_RABBITMQ_PASSWORD} \\
--env RABBITMQ_USERNAME=${SENZING_RABBITMQ_USERNAME} \\
--interactive \\
--name ${CONTAINER_NAME} \\
--publish ${CONTAINER_PORT}:15672 \\
--publish ${SENZING_DOCKER_PORT_RABBITMQ}:5672 \\
--restart always \\
--tty \\
--volume ${RABBITMQ_DIR}:/bitnami \\
${SENZING_DOCKER_RUN_PARAMETERS_GLOBAL} \\
${SENZING_DOCKER_RUN_PARAMETERS_RABBITMQ} \\
${SENZING_NETWORK_PARAMETER} \\
${SENZING_PRIVILEGED_PARAMETER} \\
bitnami/rabbitmq:${CONTAINER_VERSION} \\
>> ${CONTAINER_LOG} 2>&1
COUNTER=0
COUNTER_NOTICE=5
TIME_STRING=".."
CONTAINER_STATUS="$( docker container inspect -f '{{.State.Status}}' ${CONTAINER_NAME})"
while [ "${CONTAINER_STATUS}" != "running" ]; do
COUNTER=$((${COUNTER}+1))
if [ "${COUNTER}" -eq "${COUNTER_NOTICE}" ]; then
echo -ne "\033[2K"
echo ""
echo "To see what is happening behind-the-scenes, view the log at"
echo "${CONTAINER_LOG}"
echo "and/or run 'docker logs ${CONTAINER_NAME}'"
echo ""
fi
TIME_STRING="${TIME_STRING}."
echo -ne "\033[2K${CONTAINER_NAME} status: ${CONTAINER_STATUS}${TIME_STRING}\r"
sleep 5
CONTAINER_STATUS="$( docker container inspect -f '{{.State.Status}}' ${CONTAINER_NAME})"
done
sleep 10
echo "${SENZING_HORIZONTAL_RULE}"
echo "${SENZING_HORIZONTAL_RULE:0:2} ${CONTAINER_NAME} running on http://${SENZING_DOCKER_HOST_IP_ADDR}:${CONTAINER_PORT}"
echo "${SENZING_HORIZONTAL_RULE:0:2} Username: ${SENZING_RABBITMQ_USERNAME} Password: ${SENZING_RABBITMQ_PASSWORD}"
echo "${SENZING_HORIZONTAL_RULE:0:2} Mount information: (Format: in container > on host)"
echo "${SENZING_HORIZONTAL_RULE:0:2} /bitnami > ${RABBITMQ_DIR}"
echo "${SENZING_HORIZONTAL_RULE:0:2} Logs:"
echo "${SENZING_HORIZONTAL_RULE:0:2} ${CONTAINER_LOG}"
echo "${SENZING_HORIZONTAL_RULE:0:2} and/or run 'docker logs ${CONTAINER_NAME}'"
echo "${SENZING_HORIZONTAL_RULE:0:2} For more information:"
echo "${SENZING_HORIZONTAL_RULE:0:2} ${SENZING_REFERENCE_URL}#senzing-rabbitmq"
echo "${SENZING_HORIZONTAL_RULE}"
}
function down {
${SENZING_SUDO} docker stop ${CONTAINER_NAME} >> ${CONTAINER_LOG} 2>&1
${SENZING_SUDO} docker rm ${CONTAINER_NAME} >> ${CONTAINER_LOG} 2>&1
}
function usage {
echo "usage: $0 [up | down | restart]"
echo "For more information:"
echo "${SENZING_REFERENCE_URL}#senzing-rabbitmq"
}
# --- Main --------------------------------------------------------------------
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source ${SCRIPT_DIR}/docker-environment-vars.sh
CONTAINER_LOG="${SENZING_LOG_RABBITMQ}"
CONTAINER_NAME="${SENZING_DOCKER_CONTAINER_NAME_RABBITMQ}"
CONTAINER_PORT="${SENZING_DOCKER_PORT_RABBITMQ_UI}"
CONTAINER_VERSION="${SENZING_DOCKER_IMAGE_VERSION_RABBITMQ}"
if [ "$1" == "up" ]; then
up
elif [ "$1" == "down" ]; then
down
elif [ "$1" == "restart" ]; then
down
up
else
usage
fi
"""
return 0 | 95396425074096d17561b20cd197e77f1d550476 | 5,261 |
def mse(predictions, targets):
"""Calculate MSE: (Mean squared error)
"""
return ((predictions - targets) ** 2).mean() | 79d87a3422d4d24201cae86ee861614c83f6770f | 5,262 |
def export1d(hist):
"""Export a 1-dimensional `Hist` object to uproot
This allows one to write a coffea histogram into a ROOT file, via uproot.
Parameters
----------
hist : Hist
A 1-dimensional histogram object
Returns
-------
out
A ``uproot_methods.classes.TH1`` object
Examples
--------
Creating a coffea histogram, filling, and writing to a file::
import coffea, uproot, numpy
h = coffea.hist.Hist("Events", coffea.hist.Bin("var", "some variable", 20, 0, 1))
h.fill(var=numpy.random.normal(size=100))
fout = uproot.create('output.root')
fout['myhist'] = coffea.hist.export1d(h)
fout.close()
"""
if hist.dense_dim() != 1:
raise ValueError("export1d() can only support one dense dimension")
if hist.sparse_dim() != 0:
raise ValueError("export1d() expects zero sparse dimensions")
axis = hist.axes()[0]
sumw, sumw2 = hist.values(sumw2=True, overflow='all')[()]
edges = axis.edges(overflow='none')
out = TH1.__new__(TH1)
out._fXaxis = TAxis(len(edges) - 1, edges[0], edges[-1])
out._fXaxis._fName = axis.name
out._fXaxis._fTitle = axis.label
if not axis._uniform:
out._fXaxis._fXbins = edges.astype(">f8")
centers = (edges[:-1] + edges[1:]) / 2.0
out._fEntries = out._fTsumw = out._fTsumw2 = sumw[1:-1].sum()
out._fTsumwx = (sumw[1:-1] * centers).sum()
out._fTsumwx2 = (sumw[1:-1] * centers**2).sum()
out._fName = "histogram"
out._fTitle = hist.label
out._classname = b"TH1D"
out.extend(sumw.astype(">f8"))
out._fSumw2 = sumw2.astype(">f8")
return out | ffe09495a268c68d26f9861e6d732649f2f74497 | 5,263 |
def filter_words(w_map, emb_array, ck_filenames):
""" delete word in w_map but not in the current corpus """
vocab = set()
for filename in ck_filenames:
for line in open(filename, 'r'):
if not (line.isspace() or (len(line) > 10 and line[0:10] == '-DOCSTART-')):
line = line.rstrip('\n').split()
assert len(line) >= 3, 'wrong ck file format'
word = line[0]
vocab.add(word)
word = word.lower()
vocab.add(word)
new_w_map = {}
new_emb_array = []
for (word, idx) in w_map.items():
if word in vocab or word in ['<unk>', '<s>', '< >', '<\n>']:
assert word not in new_w_map
new_w_map[word] = len(new_emb_array)
new_emb_array.append(emb_array[idx])
print('filtered %d --> %d' % (len(emb_array), len(new_emb_array)))
return new_w_map, new_emb_array | efdef92093acf25c992dba86da25a4118ba728ec | 5,264 |
def get_cache_template(sources, grids, geopackage, table_name="tiles"):
"""
Returns the cache template which is "controlled" settings for the application.
The intent is to allow the user to configure certain things but impose specific behavior.
:param sources: A name for the source
:param grids: specific grid for the data source
:param geopackage: Location for the geopackage
:return: The dict template
"""
if sources == ["None"]:
sources = []
return {
"sources": sources,
"cache": {"type": "geopackage", "filename": str(geopackage), "table_name": table_name},
"grids": [grid for grid in grids if grid == "default"] or grids,
"format": "mixed",
"request_format": "image/png",
} | dc83a155d28e0b39f12a7dc7142b61a4bf27512b | 5,265 |
from datetime import datetime
def plotter(fdict):
""" Go """
pgconn = get_dbconn('coop')
ccursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
ctx = get_autoplot_context(fdict, get_description())
station = ctx['station']
lagmonths = ctx['lag']
months = ctx['months']
month = ctx['month']
highyears = [int(x) for x in ctx['year'].split(",")]
h = ctx['h']
wantmonth = month + lagmonths
yearoffset = 0
if month + lagmonths < 1:
wantmonth = 12 - (month + lagmonths)
yearoffset = 1
wanted = []
deltas = []
for m in range(month, month+months):
if m < 13:
wanted.append(m)
deltas.append(0)
else:
wanted.append(m-12)
deltas.append(-1)
table = "alldata_%s" % (station[:2],)
nt = network.Table("%sCLIMATE" % (station[:2],))
elnino = {}
ccursor.execute("""SELECT monthdate, soi_3m, anom_34 from elnino""")
for row in ccursor:
if row[0].month != wantmonth:
continue
elnino[row[0].year + yearoffset] = dict(soi_3m=row[1], anom_34=row[2])
ccursor.execute("""
SELECT year, month, sum(precip), avg((high+low)/2.)
from """ + table + """
where station = %s GROUP by year, month
""", (station, ))
yearly = {}
for row in ccursor:
(_year, _month, _precip, _temp) = row
if _month not in wanted:
continue
effectiveyear = _year + deltas[wanted.index(_month)]
nino = elnino.get(effectiveyear, {}).get('soi_3m', None)
if nino is None:
continue
data = yearly.setdefault(effectiveyear, dict(precip=0, temp=[],
nino=nino))
data['precip'] += _precip
data['temp'].append(float(_temp))
fig = plt.figure(figsize=(10, 6))
ax = plt.axes([0.1, 0.12, 0.5, 0.75])
msg = ("[%s] %s\n%s\n%s SOI (3 month average)"
) % (station, nt.sts[station]['name'], title(wanted),
datetime.date(2000, wantmonth, 1).strftime("%B"))
ax.set_title(msg)
cmap = plt.get_cmap("RdYlGn")
zdata = np.arange(-2.0, 2.1, 0.5)
norm = mpcolors.BoundaryNorm(zdata, cmap.N)
rows = []
xs = []
ys = []
for year in yearly:
x = yearly[year]['precip']
y = np.average(yearly[year]['temp'])
xs.append(x)
ys.append(y)
val = yearly[year]['nino']
c = cmap(norm([val])[0])
if h == 'hide' and val > -0.5 and val < 0.5:
ax.scatter(x, y, facecolor='#EEEEEE', edgecolor='#EEEEEE', s=30,
zorder=2, marker='s')
else:
ax.scatter(x, y, facecolor=c, edgecolor='k', s=60, zorder=3,
marker='o')
if year in highyears:
ax.text(x, y + 0.2, "%s" % (year, ), ha='center', va='bottom',
zorder=5)
rows.append(dict(year=year, precip=x, tmpf=y, soi3m=val))
ax.axhline(np.average(ys), lw=2, color='k', linestyle='-.', zorder=2)
ax.axvline(np.average(xs), lw=2, color='k', linestyle='-.', zorder=2)
sm = plt.cm.ScalarMappable(norm, cmap)
sm.set_array(zdata)
cb = plt.colorbar(sm, extend='both')
cb.set_label("<-- El Nino :: SOI :: La Nina -->")
ax.grid(True)
ax.set_xlim(left=-0.01)
ax.set_xlabel("Total Precipitation [inch], Avg: %.2f" % (np.average(xs),))
ax.set_ylabel((r"Average Temperature $^\circ$F, "
"Avg: %.1f") % (np.average(ys), ))
df = pd.DataFrame(rows)
ax2 = plt.axes([0.67, 0.6, 0.28, 0.35])
ax2.scatter(df['soi3m'].values, df['tmpf'].values)
ax2.set_xlabel("<-- El Nino :: SOI :: La Nina -->")
ax2.set_ylabel(r"Avg Temp $^\circ$F")
slp, intercept, r_value, _, _ = stats.linregress(df['soi3m'].values,
df['tmpf'].values)
y1 = -2.0 * slp + intercept
y2 = 2.0 * slp + intercept
ax2.plot([-2, 2], [y1, y2])
ax2.text(0.97, 0.9, "R$^2$=%.2f" % (r_value**2, ),
ha='right', transform=ax2.transAxes, bbox=dict(color='white'))
ax2.grid(True)
ax3 = plt.axes([0.67, 0.1, 0.28, 0.35])
ax3.scatter(df['soi3m'].values, df['precip'].values)
ax3.set_xlabel("<-- El Nino :: SOI :: La Nina -->")
ax3.set_ylabel("Total Precip [inch]")
slp, intercept, r_value, _, _ = stats.linregress(df['soi3m'].values,
df['precip'].values)
y1 = -2.0 * slp + intercept
y2 = 2.0 * slp + intercept
ax3.plot([-2, 2], [y1, y2])
ax3.text(0.97, 0.9, "R$^2$=%.2f" % (r_value**2, ),
ha='right', transform=ax3.transAxes, bbox=dict(color='white'))
ax3.grid(True)
return fig, df | 0f41a53336f2bf65805adaf83a8f3f17c006e161 | 5,266 |
def _action_spec():
"""Returns the action spec."""
paddle_action_spec = dm_env_rpc_pb2.TensorSpec(
dtype=dm_env_rpc_pb2.INT8, name=_ACTION_PADDLE)
tensor_spec_utils.set_bounds(
paddle_action_spec,
minimum=np.min(_VALID_ACTIONS),
maximum=np.max(_VALID_ACTIONS))
return {1: paddle_action_spec} | 130b7b2fe9f56d925d4ec1206eb3fb2752fee716 | 5,267 |
def stdin(sys_stdin):
"""
Imports standard input.
"""
inputs = [x.strip("[]\n") for x in sys_stdin]
a = [int(x) for x in inputs[0].split(",")]
x = int(inputs[1][0])
return a, x | 4c34e1bc80da31c6c7aff0d71a0c65f6fc01ed00 | 5,268 |
def _row_key(row):
"""
:param row: a normalized row from STATEMENT_METRICS_QUERY
:return: a tuple uniquely identifying this row
"""
return row['database_name'], row['user_name'], row['query_signature'], row['query_hash'], row['query_plan_hash'] | 2984e0e0b5fcc4e51a26af188e51fe65c52077a2 | 5,269 |
from io import StringIO
def get (url, user_agent=UA, referrer=None):
"""Make a GET request of the url using pycurl and return the data
(which is None if unsuccessful)"""
data = None
databuffer = StringIO()
curl = pycurl.Curl()
curl.setopt(pycurl.URL, url)
curl.setopt(pycurl.FOLLOWLOCATION, 1)
curl.setopt(pycurl.CONNECTTIMEOUT, 5)
curl.setopt(pycurl.TIMEOUT, 8)
curl.setopt(pycurl.WRITEFUNCTION, databuffer.write)
curl.setopt(pycurl.COOKIEFILE, '')
if user_agent:
curl.setopt(pycurl.USERAGENT, user_agent)
if referrer is not None:
curl.setopt(pycurl.REFERER, referrer)
try:
curl.perform()
data = databuffer.getvalue()
except Exception:
pass
curl.close()
return data | e18de239a598be249d81c2a15486a66af763bc85 | 5,270 |
def detect_callec(tree):
"""Collect names of escape continuations from call_ec invocations in tree.
Currently supported and unsupported cases::
# use as decorator, supported
@call_ec
def result(ec): # <-- we grab name "ec" from here
...
# use directly on a literal lambda, supported
result = call_ec(lambda ec: ...) # <-- we grab name "ec" from here
# use as a function, **NOT supported**
def g(ec): # <-- should grab from here
...
...
result = call_ec(g) # <-- but this is here; g could be in another module
"""
# literal function names that are always interpreted as an ec.
# "brk" is needed to combo with unpythonic.fploop.breakably_looped.
fallbacks = ["ec", "brk"]
iscallec = partial(isx, make_isxpred("call_ec"))
@Walker
def detect(tree, *, collect, **kw):
# TODO: add support for general use of call_ec as a function (difficult)
if type(tree) in (FunctionDef, AsyncFunctionDef) and any(iscallec(deco) for deco in tree.decorator_list):
fdef = tree
collect(fdef.args.args[0].arg) # FunctionDef.arguments.(list of arg objects).arg
elif is_decorated_lambda(tree, mode="any"):
decorator_list, thelambda = destructure_decorated_lambda(tree)
if any(iscallec(decocall.func) for decocall in decorator_list):
collect(thelambda.args.args[0].arg) # we assume it's the first arg, as that's what call_ec expects.
return tree
return fallbacks + detect.collect(tree) | 1980c2abd9d5b995a47eab381eb595eb71ced595 | 5,271 |
from typing import List
from typing import Tuple
from typing import Any
def apply_filters(
stream: StreamMeta, filters: List[Tuple[str, str]], config: Any
) -> StreamMeta:
"""Apply enabled filters ordered by priority on item"""
filter_pool = get_filter_pool(filters, config)
for filter_instance in filter(
lambda x: x.enabled, sorted(filter_pool, key=lambda x: x.priority)
):
filter_instance.apply(stream)
return stream | 2ff50b5d31e84ba69afe694b4beb4116dbc5fc55 | 5,272 |
def threading_d(func):
"""
A decorator to run function in background on thread
Args:
func:``function``
Function with args
Return:
background_thread: ``Thread``
"""
@wraps(func)
def wrapper(*args, **kwags):
background_thread = Thread(target=func, args=(*args,))
background_thread.daemon = True
background_thread.start()
return background_thread
return wrapper | ff4d86ded189737d68d4cdc98c0e9ba9f1a28664 | 5,273 |
def create_anchors_3d_stride(
feature_size,
sizes=[1.6, 3.9, 1.56],
anchor_strides=[0.4, 0.4, 0.0],
anchor_offsets=[0.2, -39.8, -1.78],
rotations=[0, np.pi / 2],
velocities=[],
dtype=np.float32,
):
"""
Args:
feature_size: list [D, H, W](zyx)
sizes: [N, 3] list of list or array, size of anchors, xyz
Returns:
anchors: [*feature_size, num_sizes, num_rots, 7] tensor.
"""
# almost 2x faster than v1
x_stride, y_stride, z_stride = anchor_strides
x_offset, y_offset, z_offset = anchor_offsets
z_centers = np.arange(feature_size[0], dtype=dtype)
y_centers = np.arange(feature_size[1], dtype=dtype)
x_centers = np.arange(feature_size[2], dtype=dtype)
z_centers = z_centers * z_stride + z_offset
y_centers = y_centers * y_stride + y_offset
x_centers = x_centers * x_stride + x_offset
sizes = np.reshape(np.array(sizes, dtype=dtype), [-1, 3])
rotations = np.array(rotations, dtype=dtype)
velocities = np.array(velocities, dtype=dtype).reshape([-1, 2])
combines = np.hstack([sizes, velocities]).reshape([-1, 5])
rets = np.meshgrid(x_centers, y_centers, z_centers, rotations, indexing="ij")
tile_shape = [1] * 5
tile_shape[-2] = int(sizes.shape[0])
for i in range(len(rets)):
rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape)
rets[i] = rets[i][..., np.newaxis] # for concat
# sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3])
combines = np.reshape(combines, [1, 1, 1, -1, 1, 5])
tile_size_shape = list(rets[0].shape)
tile_size_shape[3] = 1
# sizes = np.tile(sizes, tile_size_shape)
combines = np.tile(combines, tile_size_shape)
# rets.insert(3, sizes)
rets.insert(3, combines)
ret = np.concatenate(rets, axis=-1)
return np.transpose(ret, [2, 1, 0, 3, 4, 5]) | 6834d20f44196f5dad19d1917a673196334adf9f | 5,274 |
import hashlib
def sha1_file(filename):
"""
Return the hex string representation of the SHA1 checksum of the filename
"""
s = hashlib.sha1()
with open(filename, "rb") as f:
for line in f:
s.update(line)
return s.hexdigest() | b993ac9f025d69124962905f87b1968617bb33f5 | 5,275 |
def unit_parameters(_bb_spine_db_export: dict, _grid_name: str, _node_name: str, _unit_name: str, _time_index,
_alternative='Base', _eff_level=1, _p_unit=False,
_node_name_if_output=None, _node_name_if_input=None
):
"""
:param _bb_spine_db_export:
:param _grid_name:
:param _node_name: name used to search in _bb_spine_db_export
:param _unit_name:
:param _time_index: used only for time-variant fuel prices
:param _alternative:
:param _eff_level: default efficiency level for units without effLevelGroupUnit definition,
should be set to the highest level in practice
:param _p_unit: default Fault, True to enable parsing unit specific values from p_unit
:param _node_name_if_output: str, set a new name for the sought node (_node_name) if the unit outputs to it
:param _node_name_if_input: str, set a new name for the sought node (_node_name) if the unit receives its input
:return:
"""
_temp_importer = SpineDBImporter()
# Parameters 1: from BB_gdx p_gnu_io (SpineDB grid__node__unit__io)
_parameters_1 = [
x for x in _bb_spine_db_export['relationship_parameter_values']
if all([x[0] == 'grid__node__unit__io', x[1][:3] == [_grid_name, _node_name, _unit_name]])
]
_fuel_commodity = [
x[1][0] for x in _bb_spine_db_export['relationships'] if x[0] == 'commodity'
]
_unit_capacity = 0
# Parameters 2: from BB_gdx effLevelGroupUnit (SpineDB efflevel__group__unit)
# unit_online_type
_parameters_2 = [
x for x in _bb_spine_db_export['relationships']
if all([x[0] == 'efflevel__group__unit', _unit_name in x[1]])
]
# Parameters 3: from BB_gdx p_unit (SpineDB unit)
_parameters_3 = [
x for x in _bb_spine_db_export['object_parameter_values']
if all([x[0] == 'unit', x[1] == _unit_name])
]
# Translate Parameter 1
# TODO: other bb parameter under the category p_gnu_io, unitSize for investment
for par in _parameters_1:
if par[1][3] == 'output':
if _node_name_if_output:
_node_name = _node_name_if_output
# add unit__to_node relationship for output
_temp_importer.relationships.append(("unit__to_node", (_unit_name, _node_name)))
if par[2] == 'capacity':
# capacity is aggregated in Backbone but SpineOpt requires unit capacity
_number_of_units = __get_number_of_units(_bb_spine_db_export, _unit_name, default=1)
_unit_capacity = par[3] / _number_of_units[0]
_temp_importer.relationship_parameter_values += [
("unit__to_node", [_unit_name, _node_name], "unit_capacity", _unit_capacity, _alternative),
]
elif par[2] == 'conversionCoeff':
_temp_importer.relationship_parameter_values += [
("unit__to_node", [_unit_name, _node_name], "unit_conv_cap_to_flow", par[3], _alternative),
]
elif par[2] == 'vomCosts':
_temp_importer.relationship_parameter_values += [
("unit__to_node", [_unit_name, _node_name], "vom_cost", par[3], _alternative),
]
elif par[1][3] == 'input':
if _node_name_if_input:
_node_name = _node_name_if_input
# add unit__from_node relationship for input
_temp_importer.relationships.append(("unit__from_node", (_unit_name, _node_name)))
# build parameters
if par[2] == 'capacity':
# capacity is aggregated in Backbone but SpineOpt requires unit capacity
_number_of_units = __get_number_of_units(_bb_spine_db_export, _unit_name, default=1)
_unit_capacity = par[3] / _number_of_units[0]
_temp_importer.relationship_parameter_values += [
("unit__from_node", [_unit_name, _node_name], "unit_capacity", _unit_capacity, _alternative),
]
elif par[2] == 'conversionCoeff':
_temp_importer.relationship_parameter_values += [
("unit__from_node", [_unit_name, _node_name], "unit_conv_cap_to_flow", par[3], _alternative),
]
elif par[2] == 'vomCosts':
_temp_importer.relationship_parameter_values += [
("unit__from_node", [_unit_name, _node_name], "vom_cost", par[3], _alternative),
]
# For nodes which are created to supply fuels
if _node_name in _fuel_commodity:
# add an additional dummy unit to balance the input node that is particularly for fuel source
_temp_importer = dummy_unit_for_node(
_temp_importer, _node_name, f"Fueling_unit_{_node_name}", "to_node"
)
# build fuel price, in either TimeSeries or constant value
_fuel_price_dict = __restore_fuel_price_map(_bb_spine_db_export, _node_name, _alternative=_alternative)
if len(_fuel_price_dict) != 1:
__time_index = [str(x) for x in _time_index]
_fuel_price_ts = dict(zip(__time_index, list(_fuel_price_dict.values())[:len(__time_index)]))
_temp_importer.relationship_parameter_values += [
(
"unit__from_node", [_unit_name, _node_name], "fuel_cost",
{"type": "time_series", "data": _fuel_price_ts, "index": {"repeat": timeseries_repeat}},
_alternative
),
]
# constant value
else:
_fuel_price = list(_fuel_price_dict.values())[0]
_temp_importer.relationship_parameter_values += [
("unit__from_node", [_unit_name, _node_name], "fuel_cost", _fuel_price, _alternative),
]
# grid in bb_format translated as commodity in spineopt
# node in bb_format translated as node in spineopt
# unit in bb_format translated as unit in spineopt
_temp_importer.objects += [("commodity", _grid_name), ("node", _node_name), ("unit", _unit_name)]
# add node__commodity relationship
_temp_importer.relationships.append(("node__commodity", (_node_name, _grid_name)))
# Translate Parameters 2
if _parameters_2:
# TODO: level?, lambda eff type?
_unit_on = _parameters_2['level1' in _parameters_2]
if _unit_on[1][1] == 'directOnMIP':
_temp_importer.object_parameter_values.append(
("unit", _unit_name, "online_variable_type", "unit_online_variable_type_integer", _alternative),
)
elif _unit_on[1][1] == 'directOnLP':
_temp_importer.object_parameter_values.append(
("unit", _unit_name, "online_variable_type", "unit_online_variable_type_linear", _alternative),
)
elif _unit_on[1][1] == 'directOff':
_number_of_units = __get_number_of_units(_bb_spine_db_export, _unit_name, default=1)
_temp_importer.object_parameter_values.append(
("unit", _unit_name, "fix_units_on", _number_of_units[0], _alternative),
)
# Translate Parameters 3
_operating_points = [0]
_min_operating_point = _operating_points[0]
# for units with efficiency levels
if _parameters_2:
_eff_level = len(_parameters_2)
_direction = set([x[1][3] for x in _parameters_1])
# TODO: what about the units with _direction containing both input and output?
_constraint_name = f"Eff_{_unit_name}"
if 'output' in _direction and len(_direction) == 1:
if ("unit_constraint", _constraint_name) not in _temp_importer.objects:
_temp_importer.objects += [("unit_constraint", _constraint_name), ]
# specify constraint settings
_temp_importer.object_parameter_values += [
("unit_constraint", _constraint_name, "constraint_sense", "==", _alternative),
("unit_constraint", _constraint_name, "right_hand_side", 0.0, _alternative),
]
_temp_importer.relationships += [
("unit__unit_constraint", (_unit_name, _constraint_name)),
("unit__to_node__unit_constraint", (_unit_name, _node_name, _constraint_name)),
]
# data for units with constant efficiency is stored in the highest effLevel
if all(['directOff' in x[1] for x in _parameters_2]):
_max_level = max(range(0, _eff_level))
_operating_points = [x[3] for x in _parameters_3 if x[2] == f"op{_max_level:02d}"]
# eff = output/input
_unit_flow_coefficient = [-(x[3] ** -1) for x in _parameters_3 if x[2] == f"eff{_max_level:02d}"]
else:
_operating_points = list(
map(lambda i: [x[3] for x in _parameters_3 if x[2] == f"op{i:02d}"][0],
range(0, _eff_level))
)
# TODO: to be confirmed
_min_operating_point = _operating_points[0]
# eff = output/input
_unit_flow_coefficient = list(
map(lambda i: [-(x[3] ** -1) for x in _parameters_3 if x[2] == f"eff{i:02d}"][0],
range(0, _eff_level))
)
_temp_importer.relationship_parameter_values += [
("unit__to_node", (_unit_name, _node_name),
"operating_points", {"type": "array", "value_type": "float", "data": _operating_points}, _alternative),
("unit__to_node", (_unit_name, _node_name),
"minimum_operating_point", _min_operating_point, _alternative),
("unit__to_node__unit_constraint", (_unit_name, _node_name, _constraint_name),
"unit_flow_coefficient", {"type": "array", "value_type": "float", "data": _unit_flow_coefficient},
_alternative)
]
elif 'input' in _direction and len(_direction) == 1:
if ("unit_constraint", _constraint_name) not in _temp_importer.objects:
_temp_importer.objects += [("unit_constraint", _constraint_name), ]
# specify constraint settings
_temp_importer.object_parameter_values += [
("unit_constraint", _constraint_name, "constraint_sense", "==", _alternative),
("unit_constraint", _constraint_name, "right_hand_side", 0.0, _alternative),
]
_temp_importer.relationships += [
("unit__unit_constraint", (_unit_name, _constraint_name)),
("unit__from_node__unit_constraint", (_unit_name, _node_name, _constraint_name)),
]
_unit_flow_coefficient = [
x[3] for x in _parameters_1 if all([x[2] == "conversionCoeff"])
][0]
_temp_importer.relationship_parameter_values += [
("unit__from_node__unit_constraint", (_unit_name, _node_name, _constraint_name),
"unit_flow_coefficient", _unit_flow_coefficient, _alternative),
]
# Whether to parse unit specific values from p_unit
if _p_unit:
# parameters directly translatable
def _rename_unit_para(obj_para_value_entity: tuple, _target_para_name: str, __alternative: str):
_para = list(obj_para_value_entity)
_para[2] = _target_para_name
_para[4] = __alternative
return _para
for par in _parameters_3:
if par[2] == 'availability':
_parameter = _rename_unit_para(par, "unit_availability_factor", _alternative)
_temp_importer.object_parameter_values.append(tuple(_parameter))
elif par[2] == 'minOperationHours':
_parameter = _rename_unit_para(par, "min_up_time", _alternative)
_parameter[3] = {"type": "duration", "data": f"{int(_parameter[3])}h"}
_temp_importer.object_parameter_values.append(tuple(_parameter))
elif par[2] == 'minShutdownHours':
_parameter = _rename_unit_para(par, "min_down_time", _alternative)
_parameter[3] = {"type": "duration", "data": f"{int(_parameter[3])}h"}
_temp_importer.object_parameter_values.append(tuple(_parameter))
# number of units, 1 is default value defined in SpineOpt database template
elif par[2] == 'unitCount':
_parameter = _rename_unit_para(par, "number_of_units", _alternative)
_temp_importer.object_parameter_values.append(tuple(_parameter))
# EUR/start/unit, start per unit capacity
elif par[2] == 'startCostCold':
_start_up_cost = par[3] * _unit_capacity
_temp_importer.object_parameter_values.append(
("unit", _unit_name, "start_up_cost", _start_up_cost, _alternative)
)
elif par[2] == 'startFuelConsCold':
# MWh fuel/unit startup
_start_up_fuel_consumption = - par[3] * _unit_capacity
# Parameters 4: from BB_gdx p_uStartupfuel (SpineDB unit__startupFuel)
_parameters_4 = [
x for x in _bb_spine_db_export['relationship_parameter_values']
if all([x[0] == 'unit__startupFuel', _unit_name in x[1]])
]
if _parameters_4:
# the corresponding fuel node for te startup fuel
_start_up_fuel = _parameters_4[0][1][1]
# explicit startup fuel node for clarity
_startup_fuel_node = f"{_start_up_fuel}_for_unit_startup"
# _startup_fuel_node links to the same fueling unit as the fuel node
if ("node", _startup_fuel_node) not in _temp_importer.objects:
_temp_importer.objects.append(("node", _startup_fuel_node))
_temp_importer = dummy_unit_for_node(
_temp_importer, _startup_fuel_node, f"Fueling_unit_{_start_up_fuel}", "to_node"
)
# add commodity for the fuel node if there is any
# _startup_fuel_node shares the same commodity with the fuel node
_grid_for_fuel_commodity = [
x[1][0] for x in _bb_spine_db_export['relationships']
if all([x[0] == 'grid__node', _start_up_fuel in x[1]])
]
if _grid_for_fuel_commodity:
_fuel_commodity = _grid_for_fuel_commodity[0]
if ("commodity", _fuel_commodity) not in _temp_importer.objects:
_temp_importer.objects.append(("commodity", _fuel_commodity))
_temp_importer.relationships += [
("node__commodity", (_startup_fuel_node, _fuel_commodity)),
]
else:
print(f"The corresponding grid for fuel node {_start_up_fuel} is missing.")
# build unit_constraint for startup fuel flow
_constraint_name = f"Startup_fuel_{_unit_name}"
_temp_importer.objects += [
("unit_constraint", _constraint_name),
]
_temp_importer.relationships += [
("unit__from_node", (_unit_name, _startup_fuel_node)),
("unit__unit_constraint", (_unit_name, _constraint_name)),
("unit__from_node__unit_constraint", (_unit_name, _startup_fuel_node, _constraint_name)),
]
_temp_importer.object_parameter_values += [
("unit_constraint", _constraint_name, "constraint_sense", "==", _alternative),
("unit_constraint", _constraint_name, "right_hand_side", 0.0, _alternative),
]
_temp_importer.relationship_parameter_values += [
("unit__unit_constraint", (_unit_name, _constraint_name),
"units_started_up_coefficient", _start_up_fuel_consumption, _alternative),
("unit__from_node__unit_constraint", (_unit_name, _startup_fuel_node, _constraint_name),
"unit_flow_coefficient", 1.0, _alternative),
]
# build fuel price, in either TimeSeries or constant value, if there is any
_fuel_price_dict = __restore_fuel_price_map(
_bb_spine_db_export, _start_up_fuel, _alternative=_alternative
)
if len(_fuel_price_dict) != 1:
__time_index = [str(x) for x in _time_index]
_fuel_price_ts = dict(
zip(__time_index, list(_fuel_price_dict.values())[:len(__time_index)]))
_temp_importer.relationship_parameter_values += [
("unit__from_node", [_unit_name, _startup_fuel_node], "fuel_cost",
{"type": "time_series", "data": _fuel_price_ts, "index": {"repeat": timeseries_repeat}},
_alternative),
]
# constant value
else:
_fuel_price = list(_fuel_price_dict.values())[0]
_temp_importer.relationship_parameter_values += [
("unit__from_node", [_unit_name, _startup_fuel_node], "fuel_cost", _fuel_price,
_alternative),
]
# set default number_of_units and unit_availability_factor to 1.0 if not explicitly recorded in the database
# else:
# _temp_importer.object_parameter_values += [
# ("unit", _unit_name, "unit_availability_factor", 1.0, _alternative),
# ("unit", _unit_name, "number_of_units", 1.0, _alternative),
# ]
return _temp_importer | 698770193d23c8decb240132c462860cd59ef77b | 5,276 |
def read_from_file(file_path):
"""
Read a file and return a list with all the lines in the file
"""
file_in_list = []
with open(file_path, 'r') as f:
for line in f.readlines():
file_in_list.append(line)
return file_in_list | 5fef3a3f50528c1a9786451666ae7e43be282bf9 | 5,277 |
def count(predicate, iterable):
"""
Iterate over iterable, pass the value to the predicate predicate and
return the number of times the predicate returns value considered True.
@param predicate: Predicate function.
@param iterable: Iterable containing the elements to count.
@return: The number of true element.
"""
result = 0L
for i in iterable:
if predicate(i):
result += 1
return result | 1a2d9a05203f32a6f1a8349b6e31d14cb1b82b71 | 5,278 |
def get_object_from_path(path):
"""
:param path:
dot seperated path. Assumes last item is the object and first part is module
path(str) -
example:
cls = get_object_from_path("a.module.somewhere.MyClass")
you can create a path like this:
class_path = "{0}.{1}".format(MyClass.__module__, MyClass.__name__)
"""
module_path, _, obj_name = path.rpartition(".")
module = __import__(module_path, globals(), locals(), [obj_name], -1)
obj = getattr(module, obj_name, None)
return obj | e722b040486288d53fe4a357d81ddec8dfc9820e | 5,279 |
def _get_collection_memcache_key(collection_id, version=None):
"""Returns a memcache key for the collection.
Args:
collection_id: str. ID of the collection.
version: int. Schema version of the collection.
Returns:
str. The memcache key of the collection.
"""
if version:
return 'collection-version:%s:%s' % (collection_id, version)
else:
return 'collection:%s' % collection_id | cc054d726d1d2642701803a816e214eed4d9663d | 5,280 |
def biKmeans(dataSet, k, distMeas=calcEuclideanDistance):
"""
二分K-均值算法
:param dataSet:
:param k:
:param distMeas:
:return:
"""
m = np.shape(dataSet)[0]
clusterAssment = np.mat(np.zeros((m, 2)))
centroid0 = np.mean(dataSet, axis=0).tolist()[0]
centList = [centroid0] # create a list with one centroid
for j in range(m): # calc initial Error
clusterAssment[j, 1] = distMeas(np.mat(centroid0), dataSet[j, :]) ** 2
while len(centList) < k:
lowestSSE = np.inf
for i in range(len(centList)):
# get the data points currently in cluster i
ptsInCurrCluster = dataSet[np.nonzero(clusterAssment[:, 0].A == i)[0], :]
centroidMat, splitClustAss = kMeans(ptsInCurrCluster, 2, distMeas)
sseSplit = sum(splitClustAss[:, 1]) # compare the SSE to the currrent minimum
sseNotSplit = sum(clusterAssment[np.nonzero(clusterAssment[:, 0].A != i)[0], 1])
print "sseSplit, and notSplit: ", sseSplit, sseNotSplit
if (sseSplit + sseNotSplit) < lowestSSE:
bestCentToSplit = i
bestNewCents = centroidMat
bestClustAss = splitClustAss.copy()
lowestSSE = sseSplit + sseNotSplit
bestClustAss[np.nonzero(bestClustAss[:, 0].A == 1)[0], 0] = len(centList) # change 1 to 3,4, or whatever
bestClustAss[np.nonzero(bestClustAss[:, 0].A == 0)[0], 0] = bestCentToSplit
print 'the bestCentToSplit is: ', bestCentToSplit
print 'the len of bestClustAss is: ', len(bestClustAss)
centList[bestCentToSplit] = bestNewCents[0, :].tolist()[0] # replace a centroid with two best centroids
centList.append(bestNewCents[1, :].tolist()[0])
# reassign new clusters, and SSE
clusterAssment[np.nonzero(clusterAssment[:, 0].A == bestCentToSplit)[0], :] = bestClustAss
return np.mat(centList), clusterAssment | 1421dfa95c44e046bd7d729ad343e98eb83bbbcd | 5,281 |
import glob
import os
import logging
def get(directory):
"""Prepare df and gdf with solar atlas tiled data."""
files_list = glob.glob(os.path.join(directory, "*", "*.csv"))
data = []
for file in files_list:
logging.info(file)
tiles = pd.read_csv(file, header=None)
tiles.columns = ["tilename", "minx", "maxx", "miny", "maxy"]
tiles["extentBox"] = tiles.apply(
lambda x: box(x.minx, x.miny, x.maxx, x.maxy), axis=1
)
tiles["tilename"] = (
os.path.basename(os.path.dirname(file)) + "/" + tiles["tilename"]
)
tiles["start_at"] = pd.to_datetime(
"2099-" + os.path.dirname(file).split("_")[1], format="%Y-%m"
)
data.append(tiles)
data = pd.concat(data, ignore_index=True)
enermaps_data = utilities.ENERMAPS_DF
enermaps_data["fid"] = data["tilename"]
enermaps_data["start_at"] = data["start_at"]
enermaps_data["variable"] = VARIABLE
enermaps_data["unit"] = UNIT
enermaps_data["israster"] = ISRASTER
enermaps_data["dt"] = DT
spatial = gpd.GeoDataFrame(geometry=data["extentBox"], crs="EPSG:3035",)
spatial["fid"] = data["tilename"]
return enermaps_data, spatial | 69a32931930ffc5793f84faa09c3aa4b09688b42 | 5,282 |
def cleanup(args, repo):
"""Clean up undeployed pods."""
if args.keep < 0:
raise ValueError('negative keep: %d' % args.keep)
def _is_enabled_or_started(pod):
for instance in pod.iter_instances():
if scripts.systemctl_is_enabled(instance.unit_name):
return True
if scripts.systemctl_is_active(instance.unit_name):
return True
return False
for pod_dir_name in repo.get_pod_dir_names():
LOG.info('%s - cleanup', pod_dir_name)
all_pods = list(repo.iter_pods(pod_dir_name))
num_left = len(all_pods)
for pod in all_pods:
if num_left <= args.keep:
break
if _is_enabled_or_started(pod):
LOG.info('refuse to undeploy pod: %s', pod)
continue
_undeploy_pod(repo, pod)
num_left -= 1
return 0 | b015c1dbfeb3ad50218afaadfe198123ff2ab6df | 5,283 |
from typing import Dict
def optimizer_builder(
config: Dict):
"""
Instantiate an optimizer.
:param config:
:return:
"""
# --- argument checking
if not isinstance(config, dict):
raise ValueError("config must be a dictionary")
# --- read configuration
decay_rate = config["decay_rate"]
decay_steps = config["decay_steps"]
learning_rate = config["learning_rate"]
gradient_clipping_by_norm = config["gradient_clipping_by_norm"]
# --- set up schedule
lr_schedule = \
keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=learning_rate,
decay_steps=decay_steps,
decay_rate=decay_rate)
return \
keras.optimizers.RMSprop(
learning_rate=lr_schedule,
global_clipnorm=gradient_clipping_by_norm),\
lr_schedule | 2194408d74d4f03bc54371b98b12c8dbe85fb585 | 5,284 |
def psi(z: float, a: float, b: float) -> float:
"""Penalty function with uniformly bounded derivative (Eq. 20)
Args:
z: Relative distance
a: Cohesion strength
b: Separation strength
"""
c = np.abs(a - b) / (2 * np.sqrt(a * b))
return ((a + b) / 2) * (np.sqrt(1 + (z + c) ** 2) - np.sqrt(1 + c ** 2)) + ((a - b) / 2) * z | df88e57d80a32d95367f30ce52af84308349387a | 5,285 |
def caselessSort(alist):
"""Return a sorted copy of a list. If there are only strings
in the list, it will not consider case.
"""
try:
return sorted(alist, key=lambda a: (a.lower(), a))
except TypeError:
return sorted(alist) | 7558a57e28255817c71846da84230ced49553bb6 | 5,286 |
def EnableRing(serialPort):
""" Enable the ISU to listen for SBD Ring Alerts. When SBD Ring Alert indication is enabled, the 9602 asserts the RI line and issues the unsolicited result
code SBDRING when an SBD Ring Alert is received. """
# Enables Ring message to indicate there's a message to read.
Log("EnableRing()")
if not WriteAndCheck(serialPort, "AT+SBDMTA=1\r", "OK", 30):
Log("Issue enabling ring notifications.")
return False
Log("OK.")
return True | 7036610523802f659c7a69ae192f1009401a6ac3 | 5,287 |
def render(template, **context):
"""Render the given template.
:param template: The template file name or string to render.
:param **context: Context keyword-arguments.
"""
class Undefined(BaseUndefined):
def _fail_with_undefined_error(self, *args, **kwargs):
try:
super(Undefined, self)._fail_with_undefined_error(*args,
**kwargs)
except Exception, error:
return "{{{{ {0} }}}}".format(error)
try:
try:
template_file = file(expanduser(template))
loader = FileSystemLoader(dirname(template_file.name))
environment = Environment(loader=loader, undefined=Undefined)
try:
template = environment.get_template(basename(
template_file.name))
except TemplateSyntaxError, error:
message = "Template {0}:{1}, {2}{3}".format(error.filename,
error.lineno, error.message[:1].lower(), error.message[1:])
exit(message)
except IOError:
try:
template = Template(template, undefined=Undefined)
except TemplateSyntaxError, error:
message = "Template \"{0}\" {1}{2}".format(template,
error.message[:1].lower(), error.message[1:])
exit(message)
except TemplateError, error:
message = "Template error: {0}".format(error.message)
exit(message)
return template.render(context) | 6680f163e1b89424e88b1a3046784083cdbb6520 | 5,288 |
import re
from io import StringIO
import os
import json
def read(pth, format=None, encoding=None, cols=None, **kwargs):
"""Returns the contents of a file into a string or format-dependent data
type (with special handling for json and csv files).
The format will either be inferred from the file extension or can be set
explicitly using the `format` arg. Text will be read using the specified
`encoding` or default to UTF-8.
JSON files will be parsed and an appropriate python type will be selected
based on the top-level object defined in the file. The optional keyword
argument `dict` can be set to `adict` or `odict` if you'd prefer not to use
the standard python dictionary for decoded objects.
CSV files will return a list of rows. By default each row will be an ordered
list of column values. If the first line of the file defines column names,
you can call read() with cols=True in which case each row will be a namedtuple
using those names as keys. If the file doesn't define its own column names,
you can pass a list of strings as the `cols` parameter. Rows can be formatted
as column-keyed dictionaries by passing True as the `dict` parameter.
"""
if re.match(r'https?:', pth):
resp = HTTP.get(pth)
resp.raise_for_status()
extension_type = splitext(urlparse(pth).path)[-1]
content_type = resp.headers.get('content-type', extension_type).lower()
for data_t in ['json', 'csv']:
if data_t in content_type:
extension_type = data_t
if binaryish(content_type, format):
fd = BytesIO(resp.content)
else:
if encoding:
resp.encoding = encoding
elif 'charset' not in content_type:
resp.encoding = resp.apparent_encoding
fd = StringIO(resp.text)
else:
enc = encoding or 'utf-8'
extension_type = splitext(pth)[-1].lower()
if binaryish(extension_type, format):
fd = open(os.path.expanduser(pth), 'rb')
else:
fd = open(os.path.expanduser(pth), 'rt', encoding=enc)
if kwargs.get('dict') is True:
kwargs['dict'] = dict
elif kwargs.get('dict') is False:
del kwargs['dict']
dict_type = kwargs.get('dict', dict)
format = (format or extension_type).lstrip('.')
if format=='json':
return json.load(fd, object_pairs_hook=dict_type)
elif format=='csv':
dialect = csv_dialect(fd)
if cols:
if kwargs.get('dict'):
return list(csv_dict(fd, dialect=dialect, cols=cols, dict=dict_type))
else:
return list(csv_tuple(fd, dialect=dialect, cols=cols))
return list(csv_rows(fd, dialect=dialect))
else:
return fd.read() | 58f1b53b7ece08bc0da44dbd709e107e0ae46dbf | 5,289 |
def audio(src: str) -> str:
""" Insert audio tag
The tag is currently not supported by Nuance, please use `audio_player` kit:
docs/use_kits_and_actions.md
:param src:
:return:
"""
return f'<audio src="{src}"/>' | f9396d5f82eeca27089de41187fd7d5e967cc9cf | 5,290 |
import os
def read(*rnames):
"""
Read content of a file. We assume the file to be in utf8
"""
return open(os.path.join(os.path.dirname(__file__), *rnames), encoding="utf8", mode="r").read() | 15b1acf39188810080c3c47908b011011f4d35ca | 5,291 |
import math
def PerpendicularDistanceToFinish(point_b_angle: float,
point_b: gps_pb2.Point) -> float:
"""
cos(B) = Adjacent / Hypotenuse
https://www.mathsisfun.com/algebra/trig-finding-side-right-triangle.html
"""
return math.cos(math.radians(point_b_angle)) * point_b.start_finish_distance | 3c18c323c625893ab474c48eb00d48da543956ba | 5,292 |
import requests
from typing import List
def get_revolut_stocks() -> List[str]:
"""
Gets all tickers offered on Revolut trading platform.
Returns:
list(str)
"""
req = requests.get("https://globefunder.com/revolut-stocks-list/")
tickers = list(pd.read_html(req.content)[0]["Symbol"])
tickers = [ticker.replace(".", "-") for ticker in tickers]
return tickers | 3e7f41a04c653a954609cee618cbf89d962fef1d | 5,293 |
def response_text(response_class):
"""
Return the UTF-8 encoding of the API response.
:param response_class: class to cast the response to
:return: Text of the response casted to the specified class
"""
def _inner(f):
@wraps(f)
def wrapper(obj, *args, **kwargs):
result = f(obj, *args, **kwargs)
if isinstance(result, response_class):
return result
try:
return response_class(result.text)
except Exception:
logger.debug("Exception during response parsing.", exc_info=True)
raise APIError("Exception during response parsing")
return wrapper
return _inner | 43f0d4cde4790128073440172ed30850794de7a9 | 5,294 |
from typing import Tuple
def create_rankings(
a: Dataset, b: Dataset, n_samples: int = 100, unravel: bool = False, **kwargs: int
) -> Tuple[ndarray, ndarray]:
"""
Sample a dataset 'a' with 'n' negative samples given interactions in dataset 'a'
and 'b'.
Practically, this function allows you to generate evaluation data as described in
the work of He et al. [1]. The evaluation procedure assumes that the input datasets
'a' and 'b' have been generated with a leave 'n' out policy, such that dataset 'b'
corresponds to the 'training' dataset (i.e. dataset with 'left out' samples removed)
and 'a' corresponds to the 'test' dataset with 'n' for each user with
n_interactions > n. For each user in 'a', the function will return that user's 'n'
left-out interactions, plus 'n_samples' negative samples (items the user has not
interacted with in both the 'train' and 'test' datasets).
Parameters
----------
a: Dataset
The 'test' dataset (the dataset you wish to use for evaluation).
b: Dataset
The 'train' dataset (the dataset you wish to include for purposes of sampling
items the user has not interacted with -- negative samples).
n_samples: int
The total number of negative samples per user to generate. For example, if the
dataset 'a' was generated from a leave-one-out split, and n_samples=100, that
user would receive 101 samples.
unravel: bool
If 'True', the function will return two arrays, where the first element of the
first array corresponds to the user _vector_ (i.e. user ID + optional metadata),
the first element of the first array corresponds to an associated sampled item
vector(i.e. item ID + optional metadata).
Returns
-------
output: (ndarray, List[ndarray])
If 'unravel=False', the first element corresponds to an array of _ordered_ user
ids, the second the `n_samples+1`per-user samples.
If `unravel=True`, the first element corresponds to an array of _ordered_ user
vectors, the second to each individual item vector. See `unravel` argument and
`_unravel_ranked`, below. This function is provided for use when evaluating
Keras Models with the `predict` method.
References
----------
[1] He et al. https://dl.acm.org/doi/10.1145/3038912.3052569
"""
users, items, _ = a.to_components(
negative_samples=n_samples,
aux_matrix=b.interactions.tocsr(),
shuffle=False,
sampling_mode="absolute",
)
unique_users = unique(users)
sampled_users, sampled_items = (
users[len(unique_users) :],
items[len(unique_users) :],
)
_, grouped = groupby(sampled_users, sampled_items)
grouped = c_[grouped, items[: len(unique_users)]]
if unravel:
return _unravel_sampled(unique_users, grouped, a, **kwargs)
else:
return unique_users, grouped | 28282fc14d02b7f93d58d209d143a315e7b25422 | 5,295 |
def make_even(x):
"""Make number divisible by 2"""
if x % 2 != 0:
x -= 1
return x | 10129eb6abd718414d0ada53915672dcf4d7b5b6 | 5,296 |
def get_num_vehicles(session, query_filters):
"""Gets the total number of annotations."""
# pylint: disable-msg=E1101
num_vehicles_query = session.query(
func.count(Vehicle.id)) \
.join(Photo) \
.filter(Photo.test == True) \
# pylint: enable-msg=E1101
for query_filter in query_filters:
num_vehicles_query = num_vehicles_query.filter(query_filter)
num_vehicles, = num_vehicles_query.one()
return num_vehicles | bf626edad29b136bb595dabb7e878649c08c0d84 | 5,297 |
def task_status_edit(request, status_id, response_format='html'):
"""TaskStatus edit"""
status = get_object_or_404(TaskStatus, pk=status_id)
if not request.user.profile.has_permission(status, mode='w'):
return user_denied(request, message="You don't have access to this Task Status")
if request.POST:
if 'cancel' not in request.POST:
form = TaskStatusForm(
request.user.profile, request.POST, instance=status)
if form.is_valid():
status = form.save()
return HttpResponseRedirect(reverse('projects_index_by_status', args=[status.id]))
else:
return HttpResponseRedirect(reverse('projects_index_by_status', args=[status.id]))
else:
form = TaskStatusForm(request.user.profile, instance=status)
context = _get_default_context(request)
context.update({'form': form,
'status': status})
return render_to_response('projects/status_edit', context,
context_instance=RequestContext(request), response_format=response_format) | 593384ab55bf889a1e87d7909e848a2dbacad68e | 5,298 |
import platform
def is_windows_system():
"""
| ##@函数目的: 获取系统是否为Windows
| ##@参数说明:True or False
| ##@返回值:
| ##@函数逻辑:
| ##@开发人:jhuang
| ##@时间:
"""
return 'Windows' in platform.system() | 6bfe296188b9dccf8338f0b2bbaaf146d9b22243 | 5,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.