filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_28145 | import torch, os
import numpy as np
from torch import optim
from torch.autograd import Variable
from MiniImagenet import MiniImagenet
from naive5 import Naive5
import scipy.stats
from torch.utils.data import DataLoader
from torch.optim import lr_scheduler
import random, sys, pickle
import argparse
from torch import nn
global_train_acc_buff = 0
global_train_loss_buff = 0
global_test_acc_buff = 0
global_test_loss_buff = 0
global_buff = []
def write2file(n_way, k_shot):
global_buff.append([global_train_loss_buff, global_train_acc_buff, global_test_loss_buff, global_test_acc_buff])
with open("mini%d%d.pkl" % (n_way, k_shot), "wb") as fp:
pickle.dump(global_buff, fp)
def mean_confidence_interval(accs, confidence=0.95):
n = accs.shape[0]
m, se = np.mean(accs), scipy.stats.sem(accs)
h = se * scipy.stats.t._ppf((1 + confidence) / 2, n - 1)
return m, h
# save best acc info, to save the best model to ckpt.
best_accuracy = 0
def evaluation(net, batchsz, n_way, k_shot, imgsz, episodesz, threhold, mdl_file):
"""
obey the expriment setting of MAML and Learning2Compare, we randomly sample 600 episodes and 15 query images per query
set.
:param net:
:param batchsz:
:return:
"""
k_query = 15
mini_val = MiniImagenet('../mini-imagenet/', mode='test', n_way=n_way, k_shot=k_shot, k_query=k_query,
batchsz=600, resize=imgsz)
db_val = DataLoader(mini_val, batchsz, shuffle=True, num_workers=2, pin_memory=True)
accs = []
episode_num = 0 # record tested num of episodes
for batch_test in db_val:
# [60, setsz, c_, h, w]
# setsz = (5 + 15) * 5
support_x = Variable(batch_test[0]).cuda()
support_y = Variable(batch_test[1]).cuda()
query_x = Variable(batch_test[2]).cuda()
query_y = Variable(batch_test[3]).cuda()
# we will split query set into 15 splits.
# query_x : [batch, 15*way, c_, h, w]
# query_x_b : tuple, 15 * [b, way, c_, h, w]
query_x_b = torch.chunk(query_x, k_query, dim=1)
# query_y : [batch, 15*way]
# query_y_b: 15* [b, way]
query_y_b = torch.chunk(query_y, k_query, dim=1)
preds = []
net.eval()
# we don't need the total acc on 600 episodes, but we need the acc per sets of 15*nway setsz.
total_correct = 0
total_num = 0
total_loss = 0
for query_x_mini, query_y_mini in zip(query_x_b, query_y_b):
# print('query_x_mini', query_x_mini.size(), 'query_y_mini', query_y_mini.size())
loss, pred, correct = net(support_x, support_y, query_x_mini.contiguous(), query_y_mini, False)
correct = correct.sum() # multi-gpu
# pred: [b, nway]
preds.append(pred)
total_correct += correct.data[0]
total_num += query_y_mini.size(0) * query_y_mini.size(1)
total_loss += loss.data[0]
# # 15 * [b, nway] => [b, 15*nway]
# preds = torch.cat(preds, dim= 1)
acc = total_correct / total_num
print('%.5f,' % acc, end=' ')
sys.stdout.flush()
accs.append(acc)
# update tested episode number
episode_num += query_y.size(0)
if episode_num > episodesz:
# test current tested episodes acc.
acc = np.array(accs).mean()
if acc >= threhold:
# if current acc is very high, we conduct all 600 episodes testing.
continue
else:
# current acc is low, just conduct `episodesz` num of episodes.
break
# compute the distribution of 600/episodesz episodes acc.
global best_accuracy
accs = np.array(accs)
accuracy, sem = mean_confidence_interval(accs)
print('\naccuracy:', accuracy, 'sem:', sem)
print('<<<<<<<<< accuracy:', accuracy, 'best accuracy:', best_accuracy, '>>>>>>>>')
if accuracy > best_accuracy:
best_accuracy = accuracy
torch.save(net.state_dict(), mdl_file)
print('Saved to checkpoint:', mdl_file)
# we only take the last one batch as avg_loss
total_loss = total_loss / n_way / k_query
global global_test_loss_buff, global_test_acc_buff
global_test_loss_buff = total_loss
global_test_acc_buff = accuracy
write2file(n_way, k_shot)
return accuracy, sem
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('-n', help='n way')
argparser.add_argument('-k', help='k shot')
argparser.add_argument('-b', help='batch size')
argparser.add_argument('-l', help='learning rate', default=1e-3)
args = argparser.parse_args()
n_way = int(args.n)
k_shot = int(args.k)
batchsz = int(args.b)
lr = float(args.l)
k_query = 1
imgsz = 224
threhold = 0.699 if k_shot == 5 else 0.584 # threshold for when to test full version of episode
mdl_file = 'ckpt/naive5_3x3%d%d.mdl' % (n_way, k_shot)
print('mini-imagnet: %d-way %d-shot lr:%f, threshold:%f' % (n_way, k_shot, lr, threhold))
global global_buff
if os.path.exists('mini%d%d.pkl' % (n_way, k_shot)):
global_buff = pickle.load(open('mini%d%d.pkl' % (n_way, k_shot), 'rb'))
print('load pkl buff:', len(global_buff))
net = nn.DataParallel(Naive5(n_way, k_shot, imgsz), device_ids=[0, 1, 2]).cuda()
print(net)
if os.path.exists(mdl_file):
print('load from checkpoint ...', mdl_file)
net.load_state_dict(torch.load(mdl_file))
else:
print('training from scratch.')
# whole parameters number
model_parameters = filter(lambda p: p.requires_grad, net.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print('Total params:', params)
# build optimizer and lr scheduler
optimizer = optim.Adam(net.parameters(), lr=lr)
# optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9, nesterov=True)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'max', factor=0.5, patience=25, verbose=True)
for epoch in range(1000):
mini = MiniImagenet('../mini-imagenet/', mode='train', n_way=n_way, k_shot=k_shot, k_query=k_query,
batchsz=10000, resize=imgsz)
db = DataLoader(mini, batchsz, shuffle=True, num_workers=8, pin_memory=True)
total_train_loss = 0
total_train_correct = 0
total_train_num = 0
for step, batch in enumerate(db):
# 1. test
if step % 300 == 0:
# evaluation(net, batchsz, n_way, k_shot, imgsz, episodesz, threhold, mdl_file):
accuracy, sem = evaluation(net, batchsz, n_way, k_shot, imgsz, 600, threhold, mdl_file)
scheduler.step(accuracy)
# 2. train
support_x = Variable(batch[0]).cuda()
support_y = Variable(batch[1]).cuda()
query_x = Variable(batch[2]).cuda()
query_y = Variable(batch[3]).cuda()
net.train()
loss, pred, correct = net(support_x, support_y, query_x, query_y)
loss = loss.sum() / support_x.size(0) # multi-gpu, divide by total batchsz
total_train_loss += loss.data[0]
total_train_correct += correct.data[0]
total_train_num += support_y.size(0) * n_way # k_query = 1
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 3. print
if step % 20 == 0 and step != 0:
acc = total_train_correct / total_train_num
total_train_correct = 0
total_train_num = 0
print('%d-way %d-shot %d batch> epoch:%d step:%d, loss:%.4f, train acc:%.4f' % (
n_way, k_shot, batchsz, epoch, step, total_train_loss, acc))
total_train_loss = 0
global global_train_loss_buff, global_train_acc_buff
global_train_loss_buff = loss.data[0] / (n_way * k_shot)
global_train_acc_buff = acc
write2file(n_way, k_shot)
if __name__ == '__main__':
main()
|
the-stack_106_28147 | # qubit number=3
# total number=12
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.cx(input_qubit[1],input_qubit[0]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=9
prog.y(input_qubit[1]) # number=10
prog.y(input_qubit[1]) # number=11
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_noisy391.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
the-stack_106_28148 | # SVM Regression
#----------------------------------
#
# This function shows how to use TensorFlow to
# solve support vector regression. We are going
# to find the line that has the maximum margin
# which INCLUDES as many points as possible
#
# We will use the iris data, specifically:
# y = Sepal Length
# x = Pedal Width
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Create graph
sess = tf.Session()
# Load the data
# iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]
iris = datasets.load_iris()
x_vals = np.array([x[3] for x in iris.data])
y_vals = np.array([y[0] for y in iris.data])
# Split data into train/test sets
train_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8), replace=False)
test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train = x_vals[train_indices]
x_vals_test = x_vals[test_indices]
y_vals_train = y_vals[train_indices]
y_vals_test = y_vals[test_indices]
# Declare batch size
batch_size = 50
# Initialize placeholders
x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# Create variables for linear regression
A = tf.Variable(tf.random_normal(shape=[1,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
# Declare model operations
model_output = tf.add(tf.matmul(x_data, A), b)
# Declare loss function
# = max(0, abs(target - predicted) + epsilon)
# 1/2 margin width parameter = epsilon
epsilon = tf.constant([0.5])
# Margin term in loss
loss = tf.reduce_mean(tf.maximum(0., tf.subtract(tf.abs(tf.subtract(model_output, y_target)), epsilon)))
# Declare optimizer
my_opt = tf.train.GradientDescentOptimizer(0.075)
train_step = my_opt.minimize(loss)
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
# Training loop
train_loss = []
test_loss = []
for i in range(200):
rand_index = np.random.choice(len(x_vals_train), size=batch_size)
rand_x = np.transpose([x_vals_train[rand_index]])
rand_y = np.transpose([y_vals_train[rand_index]])
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
temp_train_loss = sess.run(loss, feed_dict={x_data: np.transpose([x_vals_train]), y_target: np.transpose([y_vals_train])})
train_loss.append(temp_train_loss)
temp_test_loss = sess.run(loss, feed_dict={x_data: np.transpose([x_vals_test]), y_target: np.transpose([y_vals_test])})
test_loss.append(temp_test_loss)
if (i+1)%50==0:
print('-----------')
print('Generation: ' + str(i+1))
print('A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b)))
print('Train Loss = ' + str(temp_train_loss))
print('Test Loss = ' + str(temp_test_loss))
# Extract Coefficients
[[slope]] = sess.run(A)
[[y_intercept]] = sess.run(b)
width = sess.run(epsilon)
# Get best fit line
best_fit = []
best_fit_upper = []
best_fit_lower = []
for i in x_vals:
best_fit.append(slope*i+y_intercept)
best_fit_upper.append(slope*i+y_intercept+width)
best_fit_lower.append(slope*i+y_intercept-width)
# Plot fit with data
plt.plot(x_vals, y_vals, 'o', label='Data Points')
plt.plot(x_vals, best_fit, 'r-', label='SVM Regression Line', linewidth=3)
plt.plot(x_vals, best_fit_upper, 'r--', linewidth=2)
plt.plot(x_vals, best_fit_lower, 'r--', linewidth=2)
plt.ylim([0, 10])
plt.legend(loc='lower right')
plt.title('Sepal Length vs Pedal Width')
plt.xlabel('Pedal Width')
plt.ylabel('Sepal Length')
plt.show()
# Plot loss over time
plt.plot(train_loss, 'k-', label='Train Set Loss')
plt.plot(test_loss, 'r--', label='Test Set Loss')
plt.title('L2 Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('L2 Loss')
plt.legend(loc='upper right')
plt.show()
|
the-stack_106_28149 | import collections as _collections
import os as _os
import uuid as _uuid
import six as _six
from flytekit.common import sdk_bases as _sdk_bases
from flytekit.common import utils as _utils
from flytekit.common.exceptions import scopes as _exception_scopes
from flytekit.common.exceptions import user as _user_exceptions
from flytekit.common.types import base_sdk_types as _base_sdk_types
from flytekit.common.types import helpers as _helpers
from flytekit.common.types import primitives as _primitives
from flytekit.common.types.impl import blobs as _blob_impl
from flytekit.configuration import sdk as _sdk_config
from flytekit.interfaces.data import data_proxy as _data_proxy
from flytekit.models import literals as _literal_models
from flytekit.models import types as _type_models
from flytekit.plugins import numpy as _np
from flytekit.plugins import pandas as _pd
# Note: For now, this is only for basic type-checking. We need not differentiate between TINYINT, BIGINT,
# and INT or DOUBLE and FLOAT, VARCHAR and STRING, etc. as we will unpack into appropriate Python
# objects anyway. If we work on managed tables, these more specific type specifications might become necessary.
_SUPPORTED_LITERAL_TYPE_TO_PANDAS_TYPES = None
def get_supported_literal_types_to_pandas_types():
global _SUPPORTED_LITERAL_TYPE_TO_PANDAS_TYPES
if _SUPPORTED_LITERAL_TYPE_TO_PANDAS_TYPES is None:
_SUPPORTED_LITERAL_TYPE_TO_PANDAS_TYPES = {
_primitives.Integer.to_flyte_literal_type(): {_np.int32, _np.int64, _np.uint32, _np.uint64},
_primitives.Float.to_flyte_literal_type(): {_np.float32, _np.float64},
_primitives.Boolean.to_flyte_literal_type(): {_np.bool},
_primitives.Datetime.to_flyte_literal_type(): {_np.datetime64},
_primitives.Timedelta.to_flyte_literal_type(): {_np.timedelta64},
_primitives.String.to_flyte_literal_type(): {_np.object_, _np.str_, _np.string_},
}
return _SUPPORTED_LITERAL_TYPE_TO_PANDAS_TYPES
_ALLOWED_PARTITION_TYPES = {str, int}
# Hive currently has limitations where column headers are not stored when writing to an overwrite directory. There is
# an open proposal (https://issues.apache.org/jira/browse/HIVE-12860) to improve this. Until then, we have this
# work-around where we create an external table with the appropriate schema and write the data to our desired
# location. The issue here is that the table information in the meta-store might not get cleaned up during a partial
# failure.
_HIVE_QUERY_FORMATTER = """
{stage_query_str}
CREATE TEMPORARY TABLE {table}_tmp AS {query_str};
CREATE EXTERNAL TABLE {table} LIKE {table}_tmp STORED AS PARQUET;
ALTER TABLE {table} SET LOCATION '{url}';
INSERT OVERWRITE TABLE {table}
SELECT
{columnar_query}
FROM {table}_tmp;
DROP TABLE {table};
"""
# Once https://issues.apache.org/jira/browse/HIVE-12860 is resolved. We will prefer the following syntax because it
# guarantees cleanup on partial failures.
_HIVE_QUERY_FORMATTER_V2 = """
CREATE TEMPORARY TABLE {table} AS {query_str};
INSERT OVERWRITE DIRECTORY '{url}' STORED AS PARQUET
SELECT {columnar_query}
FROM {table};
"""
# Set location in both parts of this query so in case of a partial failure, we will always have some data backing a
# partition.
_WRITE_HIVE_PARTITION_QUERY_FORMATTER = """
ALTER TABLE {write_table} ADD IF NOT EXISTS {partition_string} LOCATION '{url}';
ALTER TABLE {write_table} {partition_string} SET LOCATION '{url}';
"""
def _format_insert_partition_query(table_name, partition_string, remote_location):
table_pieces = table_name.split(".")
if len(table_pieces) > 1:
# Hive shell commands don't allow us to alter tables and select databases in the table specification. So
# we split the table name and use the 'use' command to choose the correct database.
prefix = "use {};\n".format(table_pieces[0])
table_name = ".".join(table_pieces[1:])
else:
prefix = ""
return prefix + _WRITE_HIVE_PARTITION_QUERY_FORMATTER.format(
write_table=table_name, partition_string=partition_string, url=remote_location
)
class _SchemaIO(object):
def __init__(self, schema_instance, local_dir, mode):
"""
:param Schema schema_instance:
:param flytekit.common.utils.Directory local_dir:
:param Text mode:
"""
self._schema = schema_instance
self._local_dir = local_dir
self._chunks = []
self._index = 0
self._mode = mode
def _access_guard(self):
if not self._schema:
raise _user_exceptions.FlyteAssertion(
"Schema IO object has already been closed. Cannot access chunk_count property."
)
@_exception_scopes.system_entry_point
def iter_chunks(self, *args, **kwargs):
raise _user_exceptions.FlyteAssertion("{} is write only.".format(self._schema))
@_exception_scopes.system_entry_point
def read(self, *args, **kwargs):
raise _user_exceptions.FlyteAssertion("{} is write only.".format(self._schema))
@_exception_scopes.system_entry_point
def write(self, *args, **kwargs):
raise _user_exceptions.FlyteAssertion("{} is read only.".format(self._schema))
@_exception_scopes.system_entry_point
def close(self):
self._schema = None
self._local_dir = None
self._chunks = None
self._index = 0
@property
@_exception_scopes.system_entry_point
def chunk_count(self):
self._access_guard()
return len(self._chunks)
@_exception_scopes.system_entry_point
def seek(self, index):
self._access_guard()
if index < 0 or index > self.chunk_count:
raise _user_exceptions.FlyteValueException(
index,
"Attempting to seek to a chunk that is out of range. Allowed range is [0, {}]".format(self.chunk_count),
)
self._index = index
@_exception_scopes.system_entry_point
def tell(self):
return self._index
def __repr__(self):
return "{mode} IO Object for {type} @ {location}".format(
type=self._schema.type, location=self._schema.remote_prefix, mode=self._mode
)
class _SchemaReader(_SchemaIO):
def __init__(self, schema_instance, local_dir):
"""
:param Schema schema_instance:
:param flytekit.common.utils.Directory local_dir:
"""
super(_SchemaReader, self).__init__(schema_instance, local_dir, "Read-Only")
self.reset_chunks()
@_exception_scopes.system_entry_point
def reset_chunks(self):
self._chunks = sorted(self._local_dir.list_dir())
@_exception_scopes.system_entry_point
def iter_chunks(self, columns=None, **kwargs):
self._access_guard()
while self._index < len(self._chunks):
chunk = self.read(columns=columns, concat=False, **kwargs)
if chunk is not None:
yield chunk
@staticmethod
def _read_parquet_with_type_promotion_override(chunk, columns, parquet_engine):
"""
This wrapper function of pd.read_parquet() is a hack intended to fix the type promotion problem
when using fastparquet as the underlying parquet engine.
When using fastparquet, boolean columns containing None values will be promoted to float16 columns.
This becomes problematic when users want to write the dataframe back into parquet
file because float16 (halffloat) is not a supported type in parquet spec. In this function, we detect
such columns and do override the type promotion.
"""
df = None
if parquet_engine == "fastparquet":
import fastparquet.thrift_structures as _ts
from fastparquet import ParquetFile as _ParquetFile
# https://github.com/dask/fastparquet/issues/414#issuecomment-478983811
df = _pd.read_parquet(chunk, columns=columns, engine=parquet_engine, index=False)
df_column_types = df.dtypes
pf = _ParquetFile(chunk)
schema_column_dtypes = {l.name: l.type for l in list(pf.schema.schema_elements)}
for idx in df_column_types[df_column_types == "float16"].index.tolist():
# A hacky way to get the string representations of the column types of a parquet schema
# Reference:
# https://github.com/dask/fastparquet/blob/f4ecc67f50e7bf98b2d0099c9589c615ea4b06aa/fastparquet/schema.py
if _ts.parquet_thrift.Type._VALUES_TO_NAMES[schema_column_dtypes[idx]] == "BOOLEAN":
df[idx] = df[idx].astype("object")
df[idx].replace({0: False, 1: True, _pd.np.nan: None}, inplace=True)
else:
df = _pd.read_parquet(chunk, columns=columns, engine=parquet_engine)
return df
@_exception_scopes.system_entry_point
def read(self, columns=None, concat=False, truncate_extra_columns=True, **kwargs):
"""
When this function is called, one chunk will be read and received as a Pandas data frame. Once all chunks
have been read, this function will return None.
:param list[Text] columns: A list of columns to read. They must be a subset of the columns
defined for the Schema object. If specified, truncate_extra_columns must be True.
:param bool concat: If true, the entire object will be returned in one large data frame.
:param bool truncate_extra_columns: If true, only columns from the underlying parquet file will be read if
they are specified as columns in the schema object (except for empty schemas which will read all columns
regardless). If false, if there are additional columns in the underlying parquet file, they will also be
read.
:rtype: pandas.DataFrame
"""
if columns is not None and truncate_extra_columns is False:
raise _user_exceptions.FlyteAssertion(
"When reading a schema object, it is not possible to both specify a set of columns to read and "
"additionally not truncate_extra_columns. Either columns must not be specified or "
"truncate_extra_columns must be set to True (or not specified)."
)
self._access_guard()
parquet_engine = _sdk_config.PARQUET_ENGINE.get()
if parquet_engine not in {"fastparquet", "pyarrow"}:
raise _user_exceptions.FlyteAssertion(
"environment variable parquet_engine must be one of 'pyarrow', 'fastparquet', or be unset"
)
df_out = None
if not columns:
columns = list(self._schema.type.sdk_columns.keys())
if len(columns) == 0 or truncate_extra_columns is False:
columns = None
if concat:
frames = [
# A hacky hack
# TODO: follow up the issue opened in the fastparquet repo for a more general fix
# issue URL:
_SchemaReader._read_parquet_with_type_promotion_override(
chunk=chunk, columns=columns, parquet_engine=parquet_engine
)
# _pd.read_parquet(chunk, columns=columns, engine=parquet_engine)
for chunk in self._chunks[self._index :]
if _os.path.getsize(chunk) > 0
]
if len(frames) == 1:
df_out = frames[0]
elif len(frames) > 1:
df_out = _pd.concat(frames, copy=True)
self._index = len(self._chunks)
else:
while self._index < len(self._chunks) and df_out is None:
# Skip empty chunks so the user appears to have a continuous stream of data.
if _os.path.getsize(self._chunks[self._index]) > 0:
df_out = _SchemaReader._read_parquet_with_type_promotion_override(
chunk=self._chunks[self._index], columns=columns, parquet_engine=parquet_engine, **kwargs
)
self._index += 1
if df_out is not None:
self._schema.compare_dataframe_to_schema(df_out, read=True, column_subset=columns)
# Make sure the columns are renamed to exactly what the user specifies. This prevents unexpected
# unicode v. string mismatches. Also, if a schema is mapped with strict_names=False, the input might
# have totally different names.
user_columns = columns or _six.iterkeys(self._schema.type.sdk_columns)
# User-specified columns may or may not be unicode
# Since, in python 2, dictionary does a transparent translation between unicode and str for the key,
# (https://stackoverflow.com/a/24532329)
# we use this characteristic to create a trivial lookup dictionary, to make sure we can use either
# unicode or str to lookup, but get back whatever type the user actually used
user_column_dict = {c: c for c in user_columns}
if len(self._schema.type.columns) > 0:
# Avoid using pandas.DataFrame.rename() as this function incurs significant memory overhead
df_out.columns = [
user_column_dict[col] if col in user_columns else col for col in df_out.columns.values
]
return df_out
class _SchemaWriter(_SchemaIO):
def __init__(self, schema_instance, local_dir):
"""
:param Schema schema_instance:
:param flytekit.common.utils.Directory local_dir:
:param Text mode:
"""
super(_SchemaWriter, self).__init__(schema_instance, local_dir, "Write-Only")
@_exception_scopes.system_entry_point
def close(self):
"""
Closes the writing IO context and uploads data to s3.
"""
try:
# TODO: Introduce system logging
# logging.info("Copying recursively {} -> {}".format(self._local_dir.name, self._schema.remote_prefix))
_data_proxy.Data.put_data(self._local_dir.name, self._schema.remote_prefix, is_multipart=True)
finally:
super(_SchemaWriter, self).close()
@_exception_scopes.system_entry_point
def write(self, data_frame, coerce_timestamps="us", allow_truncated_timestamps=False):
"""
Writes data frame as a chunk to the local directory owned by the Schema object. Will later be uploaded to s3.
:param pandas.DataFrame data_frame: data frame to write as parquet
:param Text coerce_timestamps: format to store timestamp in parquet. 'us', 'ms', 's' are allowed values.
Note: if your timestamps will lose data due to the coercion, your write will fail! Nanoseconds are
problematic in the Parquet format and will not work. See allow_truncated_timestamps.
:param bool allow_truncated_timestamps: default False. Allow truncation when coercing timestamps to a coarser
resolution.
"""
self._access_guard()
if not isinstance(data_frame, _pd.DataFrame):
raise _user_exceptions.FlyteTypeException(
expected_type=_pd.DataFrame,
received_type=type(data_frame),
received_value=data_frame,
additional_msg="Only pandas DataFrame objects can be written to a Schema object",
)
self._schema.compare_dataframe_to_schema(data_frame)
all_columns = list(data_frame.columns.values)
# Convert all columns to unicode as pyarrow's parquet reader can not handle mixed strings and unicode.
# Since columns from Hive are returned as unicode, if a user wants to add a column to a dataframe returned from
# Hive, then output the new data, the user would have to provide a unicode column name which is unnatural.
unicode_columns = [_six.text_type(col) for col in all_columns]
data_frame.columns = unicode_columns
try:
filename = self._local_dir.get_named_tempfile(_os.path.join(str(self._index).zfill(6)))
data_frame.to_parquet(
filename,
coerce_timestamps=coerce_timestamps,
allow_truncated_timestamps=allow_truncated_timestamps,
)
if self._index == len(self._chunks):
self._chunks.append(filename)
self._index += 1
finally:
# Return to old names to prevent odd behavior with user.
data_frame.columns = unicode_columns
class _SchemaBackingMpBlob(_blob_impl.MultiPartBlob):
@property
def directory(self):
"""
:rtype: flytekit.common.utils.Directory
"""
return self._directory
def __enter__(self):
if not self.local_path:
if _data_proxy.LocalWorkingDirectoryContext.get() is None:
raise _user_exceptions.FlyteAssertion(
"No temporary file system is present. Either call this method from within the "
"context of a task or surround with a 'with LocalTestFileSystem():' block. Or "
"specify a path when calling this function."
)
self._directory = _utils.AutoDeletingTempDir(
_uuid.uuid4().hex,
tmp_dir=_data_proxy.LocalWorkingDirectoryContext.get().name,
)
self._is_managed = True
self._directory.__enter__()
if "r" in self.mode:
_data_proxy.Data.get_data(self.remote_location, self.local_path, is_multipart=True)
def __exit__(self, exc_type, exc_val, exc_tb):
if "w" in self.mode:
_data_proxy.Data.put_data(self.local_path, self.remote_location, is_multipart=True)
return super(_SchemaBackingMpBlob, self).__exit__(exc_type, exc_val, exc_tb)
class SchemaType(_type_models.SchemaType, metaclass=_sdk_bases.ExtendedSdkType):
_LITERAL_TYPE_TO_PROTO_ENUM = {
_primitives.Integer.to_flyte_literal_type(): _type_models.SchemaType.SchemaColumn.SchemaColumnType.INTEGER,
_primitives.Float.to_flyte_literal_type(): _type_models.SchemaType.SchemaColumn.SchemaColumnType.FLOAT,
_primitives.Boolean.to_flyte_literal_type(): _type_models.SchemaType.SchemaColumn.SchemaColumnType.BOOLEAN,
_primitives.Datetime.to_flyte_literal_type(): _type_models.SchemaType.SchemaColumn.SchemaColumnType.DATETIME,
_primitives.Timedelta.to_flyte_literal_type(): _type_models.SchemaType.SchemaColumn.SchemaColumnType.DURATION,
_primitives.String.to_flyte_literal_type(): _type_models.SchemaType.SchemaColumn.SchemaColumnType.STRING,
}
def __init__(self, columns=None):
super(SchemaType, self).__init__(None)
self._set_columns(columns or [])
@property
def sdk_columns(self):
"""
This is an ordered dictionary so iterating over it will be in the order columns were specified in the
constructor.
:rtype: dict[Text, flytekit.common.types.base_sdk_types.FlyteSdkType]
"""
return self._sdk_columns
@property
def columns(self):
"""
:rtype: list[flytekit.models.types.SchemaType.SchemaColumn]
"""
return [
_type_models.SchemaType.SchemaColumn(n, type(self)._LITERAL_TYPE_TO_PROTO_ENUM[v.to_flyte_literal_type()])
for n, v in _six.iteritems(self.sdk_columns)
]
@classmethod
def promote_from_model(cls, model):
"""
:param flytekit.models.types.SchemaType model:
:rtype: SchemaType
"""
_PROTO_ENUM_TO_SDK_TYPE = {
_type_models.SchemaType.SchemaColumn.SchemaColumnType.INTEGER: _helpers.get_sdk_type_from_literal_type(
_primitives.Integer.to_flyte_literal_type()
),
_type_models.SchemaType.SchemaColumn.SchemaColumnType.FLOAT: _helpers.get_sdk_type_from_literal_type(
_primitives.Float.to_flyte_literal_type()
),
_type_models.SchemaType.SchemaColumn.SchemaColumnType.BOOLEAN: _helpers.get_sdk_type_from_literal_type(
_primitives.Boolean.to_flyte_literal_type()
),
_type_models.SchemaType.SchemaColumn.SchemaColumnType.DATETIME: _helpers.get_sdk_type_from_literal_type(
_primitives.Datetime.to_flyte_literal_type()
),
_type_models.SchemaType.SchemaColumn.SchemaColumnType.DURATION: _helpers.get_sdk_type_from_literal_type(
_primitives.Timedelta.to_flyte_literal_type()
),
_type_models.SchemaType.SchemaColumn.SchemaColumnType.STRING: _helpers.get_sdk_type_from_literal_type(
_primitives.String.to_flyte_literal_type()
),
}
return cls([(c.name, _PROTO_ENUM_TO_SDK_TYPE[c.type]) for c in model.columns])
def _set_columns(self, columns):
names_seen = set()
for column in columns:
if not isinstance(column, tuple):
raise _user_exceptions.FlyteValueException(
column,
"When specifying a Schema type with a known set of columns. Each column must be "
"specified as a tuple in the form ('name', type).",
)
if len(column) != 2:
raise _user_exceptions.FlyteValueException(
column,
"When specifying a Schema type with a known set of columns. Each column must be "
"specified as a tuple in the form ('name', type).",
)
name, sdk_type = column
sdk_type = _helpers.python_std_to_sdk_type(sdk_type)
if not isinstance(name, (str, _six.text_type)):
additional_msg = (
"When specifying a Schema type with a known set of columns, the first element in"
" each tuple must be text."
)
raise _user_exceptions.FlyteTypeException(
received_type=type(name),
received_value=name,
expected_type={str, _six.text_type},
additional_msg=additional_msg,
)
if (
not isinstance(sdk_type, _base_sdk_types.FlyteSdkType)
or sdk_type.to_flyte_literal_type() not in get_supported_literal_types_to_pandas_types()
):
additional_msg = (
"When specifying a Schema type with a known set of columns, the second element of "
"each tuple must be a supported type. Failed for column: {name}".format(name=name)
)
raise _user_exceptions.FlyteTypeException(
expected_type=list(get_supported_literal_types_to_pandas_types().keys()),
received_type=sdk_type,
additional_msg=additional_msg,
)
if name in names_seen:
raise ValueError(
"The column name {name} was specified multiple times when instantiating the "
"Schema.".format(name=name)
)
names_seen.add(name)
self._sdk_columns = _collections.OrderedDict(columns)
class Schema(_literal_models.Schema, metaclass=_sdk_bases.ExtendedSdkType):
def __init__(self, remote_path, mode="rb", schema_type=None):
"""
:param Text remote_path:
:param Text mode:
:param SchemaType schema_type: [Optional] If specified, the schema will be forced to conform to this type. If
not specified, the schema will be considered generic.
"""
self._mp_blob = _SchemaBackingMpBlob(remote_path, mode=mode)
super(Schema, self).__init__(self._mp_blob.uri, schema_type or SchemaType())
self._io_object = None
@classmethod
def promote_from_model(cls, model):
"""
:param flytekit.models.literals.Schema model:
:rtype: Schema
"""
return cls(model.uri, schema_type=SchemaType.promote_from_model(model.type))
@classmethod
@_exception_scopes.system_entry_point
def create_at_known_location(cls, known_remote_location, mode="wb", schema_type=None):
"""
:param Text known_remote_location: The location to which to write the object. Usually an s3 path.
:param Text mode:
:param SchemaType schema_type: [Optional] If specified, the schema will be forced to conform to this type. If
not specified, the schema will be considered generic.
:rtype: Schema
"""
return cls(known_remote_location, mode=mode, schema_type=schema_type)
@classmethod
@_exception_scopes.system_entry_point
def create_at_any_location(cls, mode="wb", schema_type=None):
"""
:param Text mode:
:param SchemaType schema_type: [Optional] If specified, the schema will be forced to conform to this type. If
not specified, the schema will be considered generic.
:rtype: Schema
"""
return cls.create_at_known_location(_data_proxy.Data.get_remote_path(), mode=mode, schema_type=schema_type)
@classmethod
@_exception_scopes.system_entry_point
def fetch(cls, remote_path, local_path=None, overwrite=False, mode="rb", schema_type=None):
"""
:param Text remote_path: The location from which to fetch the object. Usually an s3 path.
:param Text local_path: [Optional] A local path to which to download the object. If specified, the object
will not be managed and might not be cleaned up by the system upon exiting the context.
:param bool overwrite: If True, objects will be overwritten at the provided local_path in order to fetch this
object. Default is False.
:param Text mode: Read or write mode of the object.
:param SchemaType schema_type: [Optional] If specified, the schema will be forced to conform to this type. If
not specified, the schema will be considered generic.
:rtype: Schema
"""
schema = cls(remote_path, mode=mode, schema_type=schema_type)
schema.download(local_path=local_path, overwrite=overwrite)
return schema
@classmethod
@_exception_scopes.system_entry_point
def from_python_std(cls, t_value, schema_type=None):
"""
:param T t_value:
:param SchemaType schema_type: [Optional] If specified, we will ensure
:rtype: Schema
"""
if isinstance(t_value, (str, _six.text_type)):
if _os.path.isdir(t_value):
schema = cls.create_at_any_location(schema_type=schema_type)
schema.multipart_blob._directory = _utils.Directory(t_value)
schema.upload()
else:
schema = cls.create_at_known_location(t_value, schema_type=schema_type)
return schema
elif isinstance(t_value, cls):
return t_value
elif isinstance(t_value, _pd.DataFrame):
# Accepts a pandas dataframe and converts to a Schema object
o = cls.create_at_any_location(schema_type=schema_type)
with o as w:
w.write(t_value)
return o
elif isinstance(t_value, list):
# Accepts a list of pandas dataframe and converts to a Schema object
o = cls.create_at_any_location(schema_type=schema_type)
with o as w:
for x in t_value:
if isinstance(x, _pd.DataFrame):
w.write(x)
else:
raise _user_exceptions.FlyteTypeException(
type(t_value),
{str, _six.text_type, Schema},
received_value=x,
additional_msg="A Schema object can only be create from a pandas DataFrame or a list of pandas DataFrame.",
)
return o
else:
raise _user_exceptions.FlyteTypeException(
type(t_value),
{str, _six.text_type, Schema},
received_value=t_value,
additional_msg="Unable to create Schema from user-provided value.",
)
@classmethod
def from_string(cls, string_value, schema_type=None):
"""
:param Text string_value:
:param SchemaType schema_type:
:rtype: Schema
"""
if not string_value:
_user_exceptions.FlyteValueException(string_value, "Cannot create a Schema from an empty path")
return cls.create_at_known_location(string_value, schema_type=schema_type)
@classmethod
@_exception_scopes.system_entry_point
def create_from_hive_query(
cls,
select_query,
stage_query=None,
schema_to_table_name_map=None,
schema_type=None,
known_location=None,
):
"""
Returns a query that can be submitted to Hive and produce the desired output. It also returns a properly-typed
schema object.
:param Text select_query: Query for selecting data from Hive
:param Text stage_query: Query for building temporary tables on Hive.
Runs before the select query. Temporary tables are supported but CTEs are not supported.
:param Dict[Text, Text] schema_to_table_name_map: A map of column names in the schema to the column names
returned from the select query
:param Text known_location: create the schema object at a known s3 location.
:param SchemaType schema_type: [Optional] If specified, the schema will be forced to conform to this type. If
not specified, the schema will be considered generic.
:return: Schema, Text
"""
schema_object = cls(
known_location or _data_proxy.Data.get_remote_directory(),
mode="wb",
schema_type=schema_type,
)
if len(schema_object.type.sdk_columns) > 0:
identity_dict = {n: n for n in _six.iterkeys(schema_object.type.sdk_columns)}
identity_dict.update(schema_to_table_name_map or {})
schema_to_table_name_map = identity_dict
columnar_clauses = []
for name, sdk_type in _six.iteritems(schema_object.type.sdk_columns):
if sdk_type == _primitives.Float:
columnar_clauses.append(
"CAST({table_column_name} as double) {schema_name}".format(
table_column_name=schema_to_table_name_map[name],
schema_name=name,
)
)
else:
columnar_clauses.append(
"{table_column_name} as {schema_name}".format(
table_column_name=schema_to_table_name_map[name],
schema_name=name,
)
)
columnar_query = ",\n\t\t".join(columnar_clauses)
else:
columnar_query = "*"
stage_query_str = _six.text_type(stage_query or "")
# the stage query should always end with a semicolon
stage_query_str = stage_query_str if stage_query_str.endswith(";") else (stage_query_str + ";")
query = _HIVE_QUERY_FORMATTER.format(
url=schema_object.remote_location,
stage_query_str=stage_query_str,
query_str=select_query.strip().strip(";"),
columnar_query=columnar_query,
table=_uuid.uuid4().hex,
)
return schema_object, query
@property
def local_path(self):
"""
Local filesystem path where the file was downloaded
:rtype: Text
"""
return self._mp_blob.local_path
@property
def remote_location(self):
"""
Path to where this MultiPartBlob will be synced. This is needed for reverse compatibility.
:rtype: Text
"""
return self.uri
@property
def remote_prefix(self):
"""
Path to where this MultiPartBlob will be synced. This is needed for reverse compatibility.
:rtype: Text
"""
return self.uri
@property
def uri(self):
"""
Path to where this MultiPartBlob will be synced.
:rtype: Text
"""
return self.multipart_blob.uri
@property
def mode(self):
"""
The mode string the MultiPartBlob is associated with.
:rtype: Text
"""
return self._mp_blob.mode
@property
def type(self):
"""
The schema type definition associated with this object.
:rtype: SchemaType
"""
return self._type
@property
def multipart_blob(self):
"""
:rtype: flytekit.common.types.impl.blobs.MultiPartBlob
"""
return self._mp_blob
@_exception_scopes.system_entry_point
def __enter__(self):
"""
:rtype: _SchemaIO
"""
if self._io_object is not None:
raise _user_exceptions.FlyteAssertion(
"The context of a schema can only be entered once at a time. Make sure the previous "
"'with' block has been exited."
)
self._mp_blob.__enter__()
if "r" in self.mode:
self._io_object = _SchemaReader(self, self.multipart_blob.directory)
else:
self._io_object = _SchemaWriter(self, self.multipart_blob.directory)
return self._io_object
@_exception_scopes.system_entry_point
def __exit__(self, exc_type, exc_val, exc_tb):
self._io_object = None
return self._mp_blob.__exit__(exc_type, exc_val, exc_tb)
def __repr__(self):
return "Schema({columns}) @ {location} ({mode})".format(
columns=self.type.columns,
location=self.remote_prefix,
mode="read-only" if "r" in self.mode else "write-only",
)
@_exception_scopes.system_entry_point
def download(self, local_path=None, overwrite=False):
"""
:param Text local_path: [Optional] A local path to which to download the object. If specified, the object
will not be managed and might not be cleaned up by the system upon exiting the context.
:param bool overwrite: If True, objects will be overwritten at the provided local_path in order to fetch this
object. Default is False.
:rtype: Schema
"""
self.multipart_blob.download(local_path=local_path, overwrite=overwrite)
@_exception_scopes.system_entry_point
def get_write_partition_to_hive_table_query(
self,
table_name,
partitions=None,
schema_to_table_name_map=None,
partitions_in_table=False,
append_to_partition=False,
):
"""
Returns a Hive query string that will update the metatable to point to the data as the new partition.
:param Text table_name:
:param dict[Text, T] partitions: A dictionary mapping table partition key names to the values matching this
partition.
:param dict[Text, Text] schema_to_table_name_map: Mapping of names in current schema to table in which it is
being inserted. Currently not supported. Must be None.
:param bool partitions_in_table: Whether or not the partition columns exist in the data being submitted.
Currently not supported. Must be false
:param bool append_to_partition: Whether or not to append new values to a partition. Currently not supported.
:return: Text
"""
partition_string = ""
where_string = ""
identity_dict = {n: n for n in _six.iterkeys(self.type.sdk_columns)}
identity_dict.update(schema_to_table_name_map or {})
schema_to_table_name_map = identity_dict
table_to_schema_name_map = {v: k for k, v in _six.iteritems(schema_to_table_name_map)}
if partitions:
partition_conditions = []
for partition_name, partition_value in _six.iteritems(partitions):
if not isinstance(partition_name, (str, _six.text_type)):
raise _user_exceptions.FlyteTypeException(
expected_type={str, _six.text_type},
received_type=type(partition_name),
received_value=partition_name,
additional_msg="All partition names must be type str.",
)
if type(partition_value) not in _ALLOWED_PARTITION_TYPES:
raise _user_exceptions.FlyteTypeException(
expected_type=_ALLOWED_PARTITION_TYPES,
received_type=type(partition_value),
received_value=partition_value,
additional_msg="Partition {name} has an unsupported type.".format(name=partition_name),
)
# We need the string to be quoted in the query, so let's take repr of it.
if isinstance(partition_value, (str, _six.text_type)):
partition_value = repr(partition_value)
partition_conditions.append(
"{partition_name} = {partition_value}".format(
partition_name=partition_name, partition_value=partition_value
)
)
partition_formatter = "PARTITION (\n\t{conditions}\n)"
partition_string = partition_formatter.format(conditions=",\n\t".join(partition_conditions))
if partitions_in_table and partitions:
where_clauses = []
for partition_name, partition_value in partitions:
where_clauses.append(
"\n\t\t{schema_name} = {value_str} AND ".format(
schema_name=table_to_schema_name_map[partition_name],
value_str=partition_value,
)
)
where_string = "WHERE\n\t\t{where_clauses}".format(where_clauses=" AND\n\t\t".join(where_clauses))
if where_string or partitions_in_table:
raise _user_exceptions.FlyteAssertion(
"Currently, the partition values should not be present in the schema pushed to Hive."
)
if append_to_partition:
raise _user_exceptions.FlyteAssertion(
"Currently, partitions can only be overwritten, they cannot be appended."
)
if not partitions:
raise _user_exceptions.FlyteAssertion(
"Currently, partition values MUST be specified for writing to a table."
)
return _format_insert_partition_query(
remote_location=self.remote_location,
table_name=table_name,
partition_string=partition_string,
)
def compare_dataframe_to_schema(self, data_frame, column_subset=None, read=False):
"""
Do necessary type checking of a pandas data frame. Raise exception if it doesn't match.
:param pandas.DateFrame data_frame: data frame to type check
:param list[Text] column_subset:
:param bool read: Used to alter error message for more clarity.
"""
all_columns = list(data_frame.columns.values)
schema_column_names = list(self.type.sdk_columns.keys())
# Skip checking if we have a generic schema type (no specified columns)
if not schema_column_names:
return
# If we specify a subset of columns, ensure they all exist and then only take those columns
if column_subset is not None:
schema_column_names = []
failed_columns = []
for column in column_subset:
if column not in self.type.sdk_columns:
failed_columns.append(column)
else:
schema_column_names.append(column)
if len(failed_columns) > 0:
additional_msg = ""
raise _user_exceptions.FlyteAssertion(
"{} was/where requested but could not be found in the schema: {}.{}".format(
failed_columns, self.type.sdk_columns, additional_msg
)
)
if not all(c in all_columns for c in schema_column_names):
raise _user_exceptions.FlyteTypeException(
expected_type=self.type.sdk_columns,
received_type=data_frame.columns,
additional_msg="Mismatch between the data frame's column names {} and schema's column names {} "
"with strict_names=True.".format(all_columns, schema_column_names),
)
# This only iterates if the Schema has specified columns.
for name in schema_column_names:
literal_type = self.type.sdk_columns[name].to_flyte_literal_type()
dtype = data_frame[name].dtype
# TODO np.issubdtype is deprecated. Replace it
if all(
not _np.issubdtype(dtype, allowed_type)
for allowed_type in get_supported_literal_types_to_pandas_types()[literal_type]
):
if read:
read_or_write_msg = "read data frame object from schema"
else:
read_or_write_msg = "write data frame object to schema"
additional_msg = (
"Cannot {read_write} because the types do not match. Column "
"'{name}' did not pass type checking. Note: If your "
"column contains null values, the types might not transition as expected between parquet and "
"pandas. For more information, see: "
"http://arrow.apache.org/docs/python/pandas.html#arrow-pandas-conversion".format(
read_write=read_or_write_msg, name=name
)
)
raise _user_exceptions.FlyteTypeException(
expected_type=get_supported_literal_types_to_pandas_types()[literal_type],
received_type=dtype,
additional_msg=additional_msg,
)
def cast_to(self, other_type):
"""
:param SchemaType other_type:
:rtype: Schema
"""
if len(other_type.sdk_columns) > 0:
for k, v in _six.iteritems(other_type.sdk_columns):
if k not in self.type.sdk_columns:
raise _user_exceptions.FlyteTypeException(
self.type,
other_type,
additional_msg="Cannot cast because a required column '{}' was not found.".format(k),
received_value=self,
)
if (
not isinstance(v, _base_sdk_types.FlyteSdkType)
or v.to_flyte_literal_type() != self.type.sdk_columns[k].to_flyte_literal_type()
):
raise _user_exceptions.FlyteTypeException(
self.type.sdk_columns[k],
v,
additional_msg="Cannot cast because the column type for column '{}' does not match.".format(k),
)
return Schema(self.remote_location, mode=self.mode, schema_type=other_type)
@_exception_scopes.system_entry_point
def upload(self):
"""
Upload the schema to the remote location
"""
if "w" not in self.mode:
raise _user_exceptions.FlyteAssertion("Cannot upload a read-only schema!")
elif not self.local_path:
raise _user_exceptions.FlyteAssertion(
"The schema is not currently backed by a local directory "
"and therefore cannot be uploaded. Please write to this before "
"attempting an upload."
)
else:
# TODO: Introduce system logging
# logging.info("Putting {} -> {}".format(self.local_path, self.remote_location))
_data_proxy.Data.put_data(self.local_path, self.remote_location, is_multipart=True)
|
the-stack_106_28150 | # coding: utf-8
# /*##########################################################################
# Copyright (C) 2004-2016 V.A. Sole, European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF by the Software group.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ######################################################################### */
"""This module defines widgets used to build a fit configuration dialog.
The resulting dialog widget outputs a dictionary of configuration parameters.
"""
from silx.gui import qt
__authors__ = ["P. Knobel"]
__license__ = "MIT"
__date__ = "30/11/2016"
class TabsDialog(qt.QDialog):
"""Dialog widget containing a QTabWidget :attr:`tabWidget`
and a buttons:
# - buttonHelp
- buttonDefaults
- buttonOk
- buttonCancel
This dialog defines a __len__ returning the number of tabs,
and an __iter__ method yielding the tab widgets.
"""
def __init__(self, parent=None):
qt.QDialog.__init__(self, parent)
self.tabWidget = qt.QTabWidget(self)
layout = qt.QVBoxLayout(self)
layout.addWidget(self.tabWidget)
layout2 = qt.QHBoxLayout(None)
# self.buttonHelp = qt.QPushButton(self)
# self.buttonHelp.setText("Help")
# layout2.addWidget(self.buttonHelp)
self.buttonDefault = qt.QPushButton(self)
self.buttonDefault.setText("Default")
layout2.addWidget(self.buttonDefault)
spacer = qt.QSpacerItem(20, 20,
qt.QSizePolicy.Expanding,
qt.QSizePolicy.Minimum)
layout2.addItem(spacer)
self.buttonOk = qt.QPushButton(self)
self.buttonOk.setText("OK")
layout2.addWidget(self.buttonOk)
self.buttonCancel = qt.QPushButton(self)
self.buttonCancel.setText("Cancel")
layout2.addWidget(self.buttonCancel)
layout.addLayout(layout2)
self.buttonOk.clicked.connect(self.accept)
self.buttonCancel.clicked.connect(self.reject)
def __len__(self):
"""Return number of tabs"""
return self.tabWidget.count()
def __iter__(self):
"""Return the next tab widget in :attr:`tabWidget` every
time this method is called.
:return: Tab widget
:rtype: QWidget
"""
for widget_index in range(len(self)):
yield self.tabWidget.widget(widget_index)
def addTab(self, page, label):
"""Add a new tab
:param page: Content of new page. Must be a widget with
a get() method returning a dictionary.
:param str label: Tab label
"""
self.tabWidget.addTab(page, label)
def getTabLabels(self):
"""
Return a list of all tab labels in :attr:`tabWidget`
"""
return [self.tabWidget.tabText(i) for i in range(len(self))]
class TabsDialogData(TabsDialog):
"""This dialog adds a data attribute to :class:`TabsDialog`.
Data input in widgets, such as text entries or checkboxes, is stored in an
attribute :attr:`output` when the user clicks the OK button.
A default dictionary can be supplied when this dialog is initialized, to
be used as default data for :attr:`output`.
"""
def __init__(self, parent=None, modal=True, default=None):
"""
:param parent: Parent :class:`QWidget`
:param modal: If `True`, dialog is modal, meaning this dialog remains
in front of it's parent window and disables it until the user is
done interacting with the dialog
:param default: Default dictionary, used to initialize and reset
:attr:`output`.
"""
TabsDialog.__init__(self, parent)
self.setModal(modal)
self.setWindowTitle("Fit configuration")
self.output = {}
self.default = {} if default is None else default
self.buttonDefault.clicked.connect(self.setDefault)
# self.keyPressEvent(qt.Qt.Key_Enter).
def keyPressEvent(self, event):
"""Redefining this method to ignore Enter key
(for some reason it activates buttonDefault callback which
resets all widgets)
"""
if event.key() in [qt.Qt.Key_Enter, qt.Qt.Key_Return]:
return
TabsDialog.keyPressEvent(self, event)
def accept(self):
"""When *OK* is clicked, update :attr:`output` with data from
various widgets
"""
self.output.update(self.default)
# loop over all tab widgets (uses TabsDialog.__iter__)
for tabWidget in self:
self.output.update(tabWidget.get())
# avoid pathological None cases
for key in self.output.keys():
if self.output[key] is None:
if key in self.default:
self.output[key] = self.default[key]
super(TabsDialogData, self).accept()
def reject(self):
"""When the *Cancel* button is clicked, reinitialize :attr:`output`
and quit
"""
self.setDefault()
super(TabsDialogData, self).reject()
def setDefault(self, newdefault=None):
"""Reinitialize :attr:`output` with :attr:`default` or with
new dictionary ``newdefault`` if provided.
Call :meth:`setDefault` for each tab widget, if available.
"""
self.output = {}
if newdefault is None:
newdefault = self.default
else:
self.default = newdefault
self.output.update(newdefault)
for tabWidget in self:
if hasattr(tabWidget, "setDefault"):
tabWidget.setDefault(self.output)
class ConstraintsPage(qt.QGroupBox):
"""Checkable QGroupBox widget filled with QCheckBox widgets,
to configure the fit estimation for standard fit theories.
"""
def __init__(self, parent=None, title="Set constraints"):
super(ConstraintsPage, self).__init__(parent)
self.setTitle(title)
self.setToolTip("Disable 'Set constraints' to remove all " +
"constraints on all fit parameters")
self.setCheckable(True)
layout = qt.QVBoxLayout(self)
self.setLayout(layout)
self.positiveHeightCB = qt.QCheckBox("Force positive height/area", self)
self.positiveHeightCB.setToolTip("Fit must find positive peaks")
layout.addWidget(self.positiveHeightCB)
self.positionInIntervalCB = qt.QCheckBox("Force position in interval", self)
self.positionInIntervalCB.setToolTip(
"Fit must position peak within X limits")
layout.addWidget(self.positionInIntervalCB)
self.positiveFwhmCB = qt.QCheckBox("Force positive FWHM", self)
self.positiveFwhmCB.setToolTip("Fit must find a positive FWHM")
layout.addWidget(self.positiveFwhmCB)
self.sameFwhmCB = qt.QCheckBox("Force same FWHM for all peaks", self)
self.sameFwhmCB.setToolTip("Fit must find same FWHM for all peaks")
layout.addWidget(self.sameFwhmCB)
self.quotedEtaCB = qt.QCheckBox("Force Eta between 0 and 1", self)
self.quotedEtaCB.setToolTip(
"Fit must find Eta between 0 and 1 for pseudo-Voigt function")
layout.addWidget(self.quotedEtaCB)
layout.addStretch()
self.setDefault()
def setDefault(self, default_dict=None):
"""Set default state for all widgets.
:param default_dict: If a default config dictionary is provided as
a parameter, its values are used as default state."""
if default_dict is None:
default_dict = {}
# this one uses reverse logic: if checked, NoConstraintsFlag must be False
self.setChecked(
not default_dict.get('NoConstraintsFlag', False))
self.positiveHeightCB.setChecked(
default_dict.get('PositiveHeightAreaFlag', True))
self.positionInIntervalCB.setChecked(
default_dict.get('QuotedPositionFlag', False))
self.positiveFwhmCB.setChecked(
default_dict.get('PositiveFwhmFlag', True))
self.sameFwhmCB.setChecked(
default_dict.get('SameFwhmFlag', False))
self.quotedEtaCB.setChecked(
default_dict.get('QuotedEtaFlag', False))
def get(self):
"""Return a dictionary of constraint flags, to be processed by the
:meth:`configure` method of the selected fit theory."""
ddict = {
'NoConstraintsFlag': not self.isChecked(),
'PositiveHeightAreaFlag': self.positiveHeightCB.isChecked(),
'QuotedPositionFlag': self.positionInIntervalCB.isChecked(),
'PositiveFwhmFlag': self.positiveFwhmCB.isChecked(),
'SameFwhmFlag': self.sameFwhmCB.isChecked(),
'QuotedEtaFlag': self.quotedEtaCB.isChecked(),
}
return ddict
class SearchPage(qt.QWidget):
def __init__(self, parent=None):
super(SearchPage, self).__init__(parent)
layout = qt.QVBoxLayout(self)
self.manualFwhmGB = qt.QGroupBox("Define FWHM manually", self)
self.manualFwhmGB.setCheckable(True)
self.manualFwhmGB.setToolTip(
"If disabled, the FWHM parameter used for peak search is " +
"estimated based on the highest peak in the data")
layout.addWidget(self.manualFwhmGB)
# ------------ GroupBox fwhm--------------------------
layout2 = qt.QHBoxLayout(self.manualFwhmGB)
self.manualFwhmGB.setLayout(layout2)
label = qt.QLabel("Fwhm Points", self.manualFwhmGB)
layout2.addWidget(label)
self.fwhmPointsSpin = qt.QSpinBox(self.manualFwhmGB)
self.fwhmPointsSpin.setRange(0, 999999)
self.fwhmPointsSpin.setToolTip("Typical peak fwhm (number of data points)")
layout2.addWidget(self.fwhmPointsSpin)
# ----------------------------------------------------
self.manualScalingGB = qt.QGroupBox("Define scaling manually", self)
self.manualScalingGB.setCheckable(True)
self.manualScalingGB.setToolTip(
"If disabled, the Y scaling used for peak search is " +
"estimated automatically")
layout.addWidget(self.manualScalingGB)
# ------------ GroupBox scaling-----------------------
layout3 = qt.QHBoxLayout(self.manualScalingGB)
self.manualScalingGB.setLayout(layout3)
label = qt.QLabel("Y Scaling", self.manualScalingGB)
layout3.addWidget(label)
self.yScalingEntry = qt.QLineEdit(self.manualScalingGB)
self.yScalingEntry.setToolTip(
"Data values will be multiplied by this value prior to peak" +
" search")
self.yScalingEntry.setValidator(qt.QDoubleValidator(self))
layout3.addWidget(self.yScalingEntry)
# ----------------------------------------------------
# ------------------- grid layout --------------------
containerWidget = qt.QWidget(self)
layout4 = qt.QHBoxLayout(containerWidget)
containerWidget.setLayout(layout4)
label = qt.QLabel("Sensitivity", containerWidget)
layout4.addWidget(label)
self.sensitivityEntry = qt.QLineEdit(containerWidget)
self.sensitivityEntry.setToolTip(
"Peak search sensitivity threshold, expressed as a multiple " +
"of the standard deviation of the noise.\nMinimum value is 1 " +
"(to be detected, peak must be higher than the estimated noise)")
sensivalidator = qt.QDoubleValidator(self)
sensivalidator.setBottom(1.0)
self.sensitivityEntry.setValidator(sensivalidator)
layout4.addWidget(self.sensitivityEntry)
# ----------------------------------------------------
layout.addWidget(containerWidget)
self.forcePeakPresenceCB = qt.QCheckBox("Force peak presence", self)
self.forcePeakPresenceCB.setToolTip(
"If peak search algorithm is unsuccessful, place one peak " +
"at the maximum of the curve")
layout.addWidget(self.forcePeakPresenceCB)
layout.addStretch()
self.setDefault()
def setDefault(self, default_dict=None):
"""Set default values for all widgets.
:param default_dict: If a default config dictionary is provided as
a parameter, its values are used as default values."""
if default_dict is None:
default_dict = {}
self.manualFwhmGB.setChecked(
not default_dict.get('AutoFwhm', True))
self.fwhmPointsSpin.setValue(
default_dict.get('FwhmPoints', 8))
self.sensitivityEntry.setText(
str(default_dict.get('Sensitivity', 1.0)))
self.manualScalingGB.setChecked(
not default_dict.get('AutoScaling', False))
self.yScalingEntry.setText(
str(default_dict.get('Yscaling', 1.0)))
self.forcePeakPresenceCB.setChecked(
default_dict.get('ForcePeakPresence', False))
def get(self):
"""Return a dictionary of peak search parameters, to be processed by
the :meth:`configure` method of the selected fit theory."""
ddict = {
'AutoFwhm': not self.manualFwhmGB.isChecked(),
'FwhmPoints': self.fwhmPointsSpin.value(),
'Sensitivity': safe_float(self.sensitivityEntry.text()),
'AutoScaling': not self.manualScalingGB.isChecked(),
'Yscaling': safe_float(self.yScalingEntry.text()),
'ForcePeakPresence': self.forcePeakPresenceCB.isChecked()
}
return ddict
class BackgroundPage(qt.QGroupBox):
"""Background subtraction configuration, specific to fittheories
estimation functions."""
def __init__(self, parent=None,
title="Subtract strip background prior to estimation"):
super(BackgroundPage, self).__init__(parent)
self.setTitle(title)
self.setCheckable(True)
self.setToolTip(
"The strip algorithm strips away peaks to compute the " +
"background signal.\nAt each iteration, a sample is compared " +
"to the average of the two samples at a given distance in both" +
" directions,\n and if its value is higher than the average,"
"it is replaced by the average.")
layout = qt.QGridLayout(self)
self.setLayout(layout)
for i, label_text in enumerate(
["Strip width (in samples)",
"Number of iterations",
"Strip threshold factor"]):
label = qt.QLabel(label_text)
layout.addWidget(label, i, 0)
self.stripWidthSpin = qt.QSpinBox(self)
self.stripWidthSpin.setToolTip(
"Width, in number of samples, of the strip operator")
self.stripWidthSpin.setRange(1, 999999)
layout.addWidget(self.stripWidthSpin, 0, 1)
self.numIterationsSpin = qt.QSpinBox(self)
self.numIterationsSpin.setToolTip(
"Number of iterations of the strip algorithm")
self.numIterationsSpin.setRange(1, 999999)
layout.addWidget(self.numIterationsSpin, 1, 1)
self.thresholdFactorEntry = qt.QLineEdit(self)
self.thresholdFactorEntry.setToolTip(
"Factor used by the strip algorithm to decide whether a sample" +
"value should be stripped.\nThe value must be higher than the " +
"average of the 2 samples at +- w times this factor.\n")
self.thresholdFactorEntry.setValidator(qt.QDoubleValidator(self))
layout.addWidget(self.thresholdFactorEntry, 2, 1)
self.smoothStripGB = qt.QGroupBox("Apply smoothing prior to strip", self)
self.smoothStripGB.setCheckable(True)
self.smoothStripGB.setToolTip(
"Apply a smoothing before subtracting strip background" +
" in fit and estimate processes")
smoothlayout = qt.QHBoxLayout(self.smoothStripGB)
label = qt.QLabel("Smoothing width (Savitsky-Golay)")
smoothlayout.addWidget(label)
self.smoothingWidthSpin = qt.QSpinBox(self)
self.smoothingWidthSpin.setToolTip(
"Width parameter for Savitsky-Golay smoothing (number of samples, must be odd)")
self.smoothingWidthSpin.setRange(3, 101)
self.smoothingWidthSpin.setSingleStep(2)
smoothlayout.addWidget(self.smoothingWidthSpin)
layout.addWidget(self.smoothStripGB, 3, 0, 1, 2)
layout.setRowStretch(4, 1)
self.setDefault()
def setDefault(self, default_dict=None):
"""Set default values for all widgets.
:param default_dict: If a default config dictionary is provided as
a parameter, its values are used as default values."""
if default_dict is None:
default_dict = {}
self.setChecked(
default_dict.get('StripBackgroundFlag', True))
self.stripWidthSpin.setValue(
default_dict.get('StripWidth', 2))
self.numIterationsSpin.setValue(
default_dict.get('StripIterations', 5000))
self.thresholdFactorEntry.setText(
str(default_dict.get('StripThreshold', 1.0)))
self.smoothStripGB.setChecked(
default_dict.get('SmoothingFlag', False))
self.smoothingWidthSpin.setValue(
default_dict.get('SmoothingWidth', 3))
def get(self):
"""Return a dictionary of background subtraction parameters, to be
processed by the :meth:`configure` method of the selected fit theory.
"""
ddict = {
'StripBackgroundFlag': self.isChecked(),
'StripWidth': self.stripWidthSpin.value(),
'StripIterations': self.numIterationsSpin.value(),
'StripThreshold': safe_float(self.thresholdFactorEntry.text()),
'SmoothingFlag': self.smoothStripGB.isChecked(),
'SmoothingWidth': self.smoothingWidthSpin.value()
}
return ddict
def safe_float(string_, default=1.0):
"""Convert a string into a float.
If the conversion fails, return the default value.
"""
try:
ret = float(string_)
except ValueError:
return default
else:
return ret
def safe_int(string_, default=1):
"""Convert a string into a integer.
If the conversion fails, return the default value.
"""
try:
ret = int(float(string_))
except ValueError:
return default
else:
return ret
def getFitConfigDialog(parent=None, default=None, modal=True):
"""Instantiate and return a fit configuration dialog, adapted
for configuring standard fit theories from
:mod:`silx.math.fit.fittheories`.
:return: Instance of :class:`TabsDialogData` with 3 tabs:
:class:`ConstraintsPage`, :class:`SearchPage` and
:class:`BackgroundPage`
"""
tdd = TabsDialogData(parent=parent, default=default)
tdd.addTab(ConstraintsPage(), label="Constraints")
tdd.addTab(SearchPage(), label="Peak search")
tdd.addTab(BackgroundPage(), label="Background")
# apply default to newly added pages
tdd.setDefault()
return tdd
def main():
a = qt.QApplication([])
mw = qt.QMainWindow()
mw.show()
tdd = getFitConfigDialog(mw, default={"a": 1})
tdd.show()
tdd.exec_()
print("TabsDialogData result: ", tdd.result())
print("TabsDialogData output: ", tdd.output)
a.exec_()
if __name__ == "__main__":
main()
|
the-stack_106_28151 | import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib
import time
import ot
from scipy import linalg
from scipy import sparse
import gromovWassersteinAveraging as gwa
import spectralGW as sgw
from geodesicVisualization import *
import json
# Load the S-GWL code
import DataIO as DataIO
import EvaluationMeasure as Eval
import GromovWassersteinGraphToolkit as GwGt
import pickle
import warnings
# Load modules for network partitioning experiments
import community
from networkx.algorithms.community import greedy_modularity_communities
from networkx.algorithms.community.asyn_fluid import asyn_fluidc
from sklearn import metrics
from infomap import Infomap
warnings.filterwarnings("ignore")
# dictionaries for holding results
scores = {}
runtimes = {}
avetimes = {}
# load data
f = open('data/wikicats.p', 'rb')
database = pickle.load(f)
f.close()
dG = database['G']
labels = database['labels']
num_nodes = dG.number_of_nodes()
num_partitions = len(np.unique(labels))
idx2node = {}
for n in dG.nodes:
idx2node[n] = n
G = dG.to_undirected()
# create noisy version
nG = nx.Graph()
ndG = nx.DiGraph()
for n in G.nodes:
nG.add_node(n)
for n in dG.nodes:
ndG.add_node(n)
for e in G.edges:
nG.add_edge(e[0],e[1])
for e in dG.edges:
ndG.add_edge(e[0],e[1])
start_edges = nx.number_of_edges(nG)
# add noise
for j in range(int( 0.1*G.number_of_edges() )):
x1 = int(num_nodes * np.random.rand())
x2 = int(num_nodes * np.random.rand())
if database['labels'][x1] != database['labels'][x2]:
nG.add_edge(x1, x2)
ndG.add_edge(x1,x2)
print('---{:3d} edges in raw version \n'.format(G.number_of_edges()))
print('---Added {:d} edges to create noisy version \n'.format(nx.number_of_edges(nG)-start_edges))
print('---Data files loaded. Computing...\n')
def process_sgwl_wiki(cost,database,num_nodes,num_partitions,beta=2e-7,verbose=False):
p_s = np.zeros((num_nodes, 1))
p_s[:, 0] = np.sum(cost, axis=1) ** 0.001
p_s /= np.sum(p_s)
p_t = GwGt.estimate_target_distribution({0: p_s}, dim_t=num_partitions)
ot_dict = {'loss_type': 'L2', # the key hyperparameters of GW distance
'ot_method': 'proximal',
'beta': beta,
'outer_iteration': 300,
# outer, inner iteration, error bound of optimal transport
'iter_bound': 1e-30,
'inner_iteration': 1,
'sk_bound': 1e-30,
'node_prior': 0,
'max_iter': 200, # iteration and error bound for calcuating barycenter
'cost_bound': 1e-16,
'update_p': False, # optional updates of source distribution
'lr': 0,
'alpha': 0}
sub_costs, sub_probs, sub_idx2nodes, trans = GwGt.graph_partition(cost,
p_s,
p_t,
idx2node,
ot_dict)
est_idx = np.argmax(trans, axis=1)
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx)
if verbose:
print('---Mutual information score = {:3.3f}'.format(mutual_info))
return mutual_info
"""
###########################################################
###########################################################
# Method: Fluid communities (symmetrized)
###########################################################
# Raw data
if not nx.is_connected(G):
#print('---Fluid community requires connected graph, skipping raw version---')
scores['fluid-symmetrized-raw'] = 'failed'
runtimes['fluid-symmetrized-raw'] = 'failed'
else:
time_s = time.time()
comp = asyn_fluidc(G.to_undirected(), k=num_partitions)
list_nodes = [frozenset(c) for c in comp]
est_idx = np.zeros((num_nodes,))
for i in range(len(list_nodes)):
for idx in list_nodes[i]:
est_idx[idx] = i
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx)
scores['fluid-symmetrized-raw'] = mutual_info
runtimes['fluid-symmetrized-raw'] = runtime
# Noisy data
if not nx.is_connected(nG):
print('---Fluid community requires connected graph, skipping noisy version---')
scores['fluid-symmetrized-noisy'] = 'failed'
runtimes['fluid-symmetrized-noisy'] = 'failed'
else:
time_s = time.time()
comp = asyn_fluidc(nG.to_undirected(), k=num_partitions)
list_nodes = [frozenset(c) for c in comp]
est_idx = np.zeros((num_nodes,))
for i in range(len(list_nodes)):
for idx in list_nodes[i]:
est_idx[idx] = i
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx)
scores['fluid-symmetrized-noisy'] = mutual_info
runtimes['fluid-symmetrized-noisy'] = runtime
###########################################################
###########################################################
# Method: FastGreedy (symmetrized)
###########################################################
# Raw
time_s = time.time()
list_nodes = list(greedy_modularity_communities(G))
est_idx = np.zeros((num_nodes,))
for i in range(len(list_nodes)):
for idx in list_nodes[i]:
est_idx[idx] = i
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx)
scores['fastgreedy-symmetrized-raw'] = mutual_info
runtimes['fastgreedy-symmetrized-raw'] = runtime
# Noisy
time_s = time.time()
list_nodes = list(greedy_modularity_communities(nG))
est_idx = np.zeros((num_nodes,))
for i in range(len(list_nodes)):
for idx in list_nodes[i]:
est_idx[idx] = i
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx)
scores['fastgreedy-symmetrized-noisy'] = mutual_info
runtimes['fastgreedy-symmetrized-noisy'] = runtime
###########################################################
###########################################################
# Method: Louvain (symmetrized)
###########################################################
# Raw
time_s = time.time()
partition = community.best_partition(G)
est_idx = np.zeros((num_nodes,))
for com in set(partition.values()):
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
for idx in list_nodes:
est_idx[idx] = com
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx)
scores['louvain-symmetrized-raw'] = mutual_info
runtimes['louvain-symmetrized-raw'] = runtime
# Noisy
time_s = time.time()
partition = community.best_partition(nG)
est_idx = np.zeros((num_nodes,))
for com in set(partition.values()):
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
for idx in list_nodes:
est_idx[idx] = com
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx)
scores['louvain-symmetrized-noisy'] = mutual_info
runtimes['louvain-symmetrized-noisy'] = runtime
###########################################################
###########################################################
# Method: Infomap (symmetrized)
###########################################################
# Raw
time_s = time.time()
im = Infomap()
for node in G.nodes:
im.add_node(node)
for edge in G.edges:
im.add_link(edge[0], edge[1])
im.add_link(edge[1], edge[0])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx)
scores['infomap-symmetrized-raw'] = mutual_info
runtimes['infomap-symmetrized-raw'] = runtime
# Noisy
print('---Running Infomap with noisy data---\n')
time_s = time.time()
im = Infomap()
for node in nG.nodes:
im.add_node(node)
for edge in nG.edges:
im.add_link(edge[0], edge[1])
im.add_link(edge[1], edge[0])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx)
scores['infomap-symmetrized-noisy'] = mutual_info
runtimes['infomap-symmetrized-noisy'] = runtime
###########################################################
###########################################################
# Method: Infomap (asymmetric)
###########################################################
# Raw
time_s = time.time()
im = Infomap()
for node in dG.nodes:
im.add_node(node)
for edge in dG.edges:
im.add_link(edge[0], edge[1])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx)
scores['infomap-asymmetric-raw'] = mutual_info
runtimes['infomap-asymmetric-raw'] = runtime
# Noisy
print('---Running Infomap with noisy data---\n')
time_s = time.time()
im = Infomap()
for node in ndG.nodes:
im.add_node(node)
for edge in ndG.edges:
im.add_link(edge[0], edge[1])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx)
scores['infomap-asymmetric-noisy'] = mutual_info
runtimes['infomap-asymmetric-noisy'] = runtime
"""
###########################################################
###########################################################
# Method: GWL, symmetrized
###########################################################
# Raw
start = time.time()
cost = nx.adjacency_matrix(G).toarray().astype(np.float64)
mutual_info = process_sgwl_wiki(cost,database,num_nodes,num_partitions,beta=2e-7);
end = time.time()
scores['gwl-symmetrized-raw'] = mutual_info
runtimes['gwl-symmetrized-raw'] = end-start
# Noisy
start = time.time()
cost = nx.adjacency_matrix(nG).toarray().astype(np.float64)
mutual_info = process_sgwl_wiki(cost,database,num_nodes,num_partitions,beta=2e-7);
end = time.time()
scores['gwl-symmetrized-noisy'] = mutual_info
runtimes['gwl-symmetrized-noisy'] = end-start
###########################################################
###########################################################
# Method: GWL, asymmetric
###########################################################
# Raw
start = time.time()
cost = nx.adjacency_matrix(dG).toarray().astype(np.float64)/num_nodes
mutual_info = process_sgwl_wiki(cost,database,num_nodes,num_partitions);
end = time.time()
scores['gwl-asymmetric-raw'] = mutual_info
runtimes['gwl-asymmetric-raw'] = end-start
# Noisy
start = time.time()
cost = nx.adjacency_matrix(ndG).toarray().astype(np.float64)/num_nodes
mutual_info = process_sgwl_wiki(cost,database,num_nodes,num_partitions);
end = time.time()
scores['gwl-asymmetric-noisy'] = mutual_info
runtimes['gwl-asymmetric-noisy'] = end-start
###########################################################
###########################################################
# Proposed method: SpecGWL (symmetrized)
###########################################################
# Raw
mis = []
rt = []
ts = [24.7]#np.logspace(-1,2,100)
for t in ts:
start = time.time()
cost = sgw.undirected_normalized_heat_kernel(G,t).astype(np.float64)
mutual_info = process_sgwl_wiki(cost,database,num_nodes,num_partitions,beta=2e-5);
mis.append(mutual_info)
end = time.time()
rt.append(end-start)
# print('--- Raw data | Symmetrized | SpecGWL | Best mutual information score: {:3.3f} | @t = {:3.3f} | average runtime per iteration = {:3.3f}'.format(max(mis), ts[np.argmax(mis)], np.mean(rt)))
scores['specgwl-symmetrized-raw'] = max(mis)
runtimes['specgwl-symmetrized-raw'] = sum(rt)
# avetimes['specgwl-symmetrized-raw'] = np.mean(rt)
# Noisy
mis = []
rt = []
ts = [39.4]#np.logspace(-1,2,100)
for t in ts:
start = time.time()
cost = sgw.undirected_normalized_heat_kernel(nG,t).astype(np.float64)
mi = process_sgwl_wiki(cost,database,num_nodes,num_partitions,beta=2e-5);
mis.append(mi)
end = time.time()
rt.append(end-start)
# print('--- Noisy data | Symmetrized | SpecGWL | Best mutual information score: {:3.3f} | @t = {:3.3f} | average runtime per iteration = {:3.3f}'.format(max(mis), ts[np.argmax(mis)], np.mean(rt)))
scores['specgwl-symmetrized-noisy'] = max(mis)
runtimes['specgwl-symmetrized-noisy'] = sum(rt)
# avetimes['specgwl-symmetrized-noisy'] = np.mean(rt)
###########################################################
###########################################################
# Proposed method: SpecGWL (asymmetric)
###########################################################
# Raw
mis = []
rt = []
ts = [4]#np.logspace(-1,2,100)
for t in ts:
start = time.time()
cost = sgw.directed_heat_kernel(dG,t).astype(np.float64)
mutual_info = process_sgwl_wiki(cost,database,num_nodes,num_partitions,beta=2e-5);
mis.append(mutual_info)
end = time.time()
rt.append(end-start)
# print('--- Raw data | Asymmetric | SpecGWL | Best mutual information score: {:3.3f} | @t = {:3.3f} | average runtime per iteration = {:3.3f}'.format(max(mis), ts[np.argmax(mis)], np.mean(rt)))
scores['specgwl-asymmetric-raw'] = max(mis)
runtimes['specgwl-asymmetric-raw'] = sum(rt)
# avetimes['specgwl-asymmetric-raw'] = np.mean(rt)
# Noisy
mis = []
rt = []
ts = [2.3]#np.logspace(-1,2,100)
for t in ts:
start = time.time()
cost = sgw.directed_heat_kernel(ndG,t).astype(np.float64)
mi = process_sgwl_wiki(cost,database,num_nodes,num_partitions,beta=2e-5);
mis.append(mi)
end = time.time()
rt.append(end-start)
# print('--- Noisy data | Asymmetric | SpecGWL | Best mutual information score: {:3.3f} | @t = {:3.3f} | average runtime per iteration = {:3.3f}'.format(max(mis), ts[np.argmax(mis)], np.mean(rt)))
scores['specgwl-asymmetric-noisy'] = max(mis)
runtimes['specgwl-asymmetric-noisy'] = sum(rt)
# avetimes['specgwl-asymmetric-noisy'] = np.mean(rt)
print('Mutual information scores')
print(json.dumps(scores,indent=1))
print('Runtimes')
print(json.dumps(runtimes,indent=1))
# print('Average runtime of SpecGWL')
# print(json.dumps(avetimes,indent=1))
with open('res_benchmark_regularized_wikicats.txt', 'w') as outfile:
json.dump(['Adjusted mutual information scores',
scores,
'Runtimes',
runtimes], outfile,indent=1) |
the-stack_106_28152 | """
Fine-tune Faster R-CNN on HICO-DET
Fred Zhang <[email protected]>
The Australian National University
Australian Centre for Robotic Vision
"""
import os
import math
import json
import copy
import time
import torch
import bisect
import argparse
import torchvision
import numpy as np
from tqdm import tqdm
from itertools import repeat, chain
from collections import defaultdict
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import Sampler, BatchSampler
import pocket
from pocket.data import HICODet, DatasetConcat
from pocket.ops import RandomHorizontalFlip, to_tensor
class DetectorEngine(pocket.core.LearningEngine):
def __init__(self, net, train_loader, val_loader, **kwargs):
super().__init__(net, None, train_loader, **kwargs)
self._val_loader = val_loader
self.timer = pocket.utils.HandyTimer(1)
def _on_each_iteration(self):
self._state.output = self._state.net(*self._state.inputs, targets=self._state.targets)
self._state.loss = sum(loss for loss in self._state.output.values())
self._state.optimizer.zero_grad()
self._state.loss.backward()
self._state.optimizer.step()
def _on_end_epoch(self):
with self.timer:
ap, max_rec = self.validate()
print("\n=> Validation (+{:.2f})\n"
"Epoch: {} | mAP: {:.4f}, mRec: {:.4f} | Time: {:.2f}s\n".format(
time.time() - self._dawn, self._state.epoch,
ap.mean().item(), max_rec.mean().item(), self.timer[0]
))
super()._on_end_epoch()
@torch.no_grad()
def validate(self, min_iou=0.5, nms_thresh=0.5):
num_gt = torch.zeros(80)
associate = pocket.utils.BoxAssociation(min_iou=min_iou)
meter = pocket.utils.DetectionAPMeter(
80, algorithm='INT', nproc=10
)
self._state.net.eval()
for batch in tqdm(self._val_loader):
inputs = pocket.ops.relocate_to_cuda(batch[0])
output = self._state.net(inputs)
assert len(output) == 1, "The batch size should be one"
# Relocate back to cpu
output = pocket.ops.relocate_to_cpu(output[0])
target = batch[1][0]
# Do NMS on ground truth boxes
# NOTE This is because certain objects appear multiple times in
# different pairs and different interactions
keep_gt_idx = torchvision.ops.boxes.batched_nms(
target['boxes'], torch.ones_like(target['labels']).float(),
target['labels'], nms_thresh
)
gt_boxes = target['boxes'][keep_gt_idx].view(-1, 4)
gt_classes = target['labels'][keep_gt_idx].view(-1)
# Update the number of ground truth instances
# Convert the object index to zero based
for c in gt_classes:
num_gt[c - 1] += 1
# Associate detections with ground truth
binary_labels = torch.zeros_like(output['scores'])
unique_obj = output['labels'].unique()
for obj_idx in unique_obj:
det_idx = torch.nonzero(output['labels'] == obj_idx).squeeze(1)
gt_idx = torch.nonzero(gt_classes == obj_idx).squeeze(1)
if len(gt_idx) == 0:
continue
binary_labels[det_idx] = associate(
gt_boxes[gt_idx].view(-1, 4),
output['boxes'][det_idx].view(-1, 4),
output['scores'][det_idx].view(-1)
)
meter.append(output['scores'], output['labels'] - 1, binary_labels)
meter.num_gt = num_gt.tolist()
ap = meter.eval()
return ap, meter.max_rec
class HICODetObject(Dataset):
def __init__(self, dataset, data_root, nms_thresh=0.5, random_flip=False):
self.dataset = dataset
self.nms_thresh = nms_thresh
with open(os.path.join(data_root, 'coco80tohico80.json'), 'r') as f:
corr = json.load(f)
self.hico2coco91 = dict(zip(corr.values(), corr.keys()))
self.transform = RandomHorizontalFlip() if random_flip else None
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
image, target = self.dataset[idx]
boxes = torch.cat([
target['boxes_h'],
target['boxes_o']
])
# Convert ground truth boxes to zero-based index and the
# representation from pixel indices to coordinates
boxes[:, :2] -= 1
labels = torch.cat([
49 * torch.ones_like(target['object']),
target['object']
])
# Convert HICODet object indices to COCO indices
converted_labels = torch.tensor([int(self.hico2coco91[i.item()]) for i in labels])
# Apply transform
if self.transform is not None:
image, boxes = self.transform(image, boxes)
image = to_tensor(image, input_format='pil')
return [image], [dict(boxes=boxes, labels=converted_labels)]
def collate_fn(batch):
images = []
targets = []
for im, tar in batch:
images += im
targets += tar
return images, targets
"""
Batch sampler that groups images by aspect ratio
https://github.com/pytorch/vision/blob/master/references/detection/group_by_aspect_ratio.py
"""
def _repeat_to_at_least(iterable, n):
repeat_times = math.ceil(n / len(iterable))
repeated = chain.from_iterable(repeat(iterable, repeat_times))
return list(repeated)
class GroupedBatchSampler(BatchSampler):
"""
Wraps another sampler to yield a mini-batch of indices.
It enforces that the batch only contain elements from the same group.
It also tries to provide mini-batches which follows an ordering which is
as close as possible to the ordering from the original sampler.
Arguments:
sampler (Sampler): Base sampler.
group_ids (list[int]): If the sampler produces indices in range [0, N),
`group_ids` must be a list of `N` ints which contains the group id of each sample.
The group ids must be a continuous set of integers starting from
0, i.e. they must be in the range [0, num_groups).
batch_size (int): Size of mini-batch.
"""
def __init__(self, sampler, group_ids, batch_size):
if not isinstance(sampler, Sampler):
raise ValueError(
"sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}".format(sampler)
)
self.sampler = sampler
self.group_ids = group_ids
self.batch_size = batch_size
def __iter__(self):
buffer_per_group = defaultdict(list)
samples_per_group = defaultdict(list)
num_batches = 0
for idx in self.sampler:
group_id = self.group_ids[idx]
buffer_per_group[group_id].append(idx)
samples_per_group[group_id].append(idx)
if len(buffer_per_group[group_id]) == self.batch_size:
yield buffer_per_group[group_id]
num_batches += 1
del buffer_per_group[group_id]
assert len(buffer_per_group[group_id]) < self.batch_size
# now we have run out of elements that satisfy
# the group criteria, let's return the remaining
# elements so that the size of the sampler is
# deterministic
expected_num_batches = len(self)
num_remaining = expected_num_batches - num_batches
if num_remaining > 0:
# for the remaining batches, take first the buffers with largest number
# of elements
for group_id, _ in sorted(buffer_per_group.items(),
key=lambda x: len(x[1]), reverse=True):
remaining = self.batch_size - len(buffer_per_group[group_id])
samples_from_group_id = _repeat_to_at_least(samples_per_group[group_id], remaining)
buffer_per_group[group_id].extend(samples_from_group_id[:remaining])
assert len(buffer_per_group[group_id]) == self.batch_size
yield buffer_per_group[group_id]
num_remaining -= 1
if num_remaining == 0:
break
assert num_remaining == 0
def __len__(self):
return len(self.sampler) // self.batch_size
def _quantize(x, bins):
bins = copy.deepcopy(bins)
bins = sorted(bins)
quantized = list(map(lambda y: bisect.bisect_right(bins, y), x))
return quantized
def create_aspect_ratio_groups(aspect_ratios, k=0):
bins = (2 ** np.linspace(-1, 1, 2 * k + 1)).tolist() if k > 0 else [1.0]
groups = _quantize(aspect_ratios, bins)
# count number of elements per group
counts = np.unique(groups, return_counts=True)[1]
fbins = [0] + bins + [np.inf]
print("Using {} as bins for aspect ratio quantization".format(fbins))
print("Count of instances per bin: {}".format(counts))
return groups
def main(args):
torch.cuda.set_device(0)
torch.manual_seed(args.random_seed)
train2015 = HICODetObject(HICODet(
root=os.path.join(args.data_root, "hico_20160224_det/images/train2015"),
anno_file=os.path.join(args.data_root, "instances_train2015.json"),
target_transform=pocket.ops.ToTensor(input_format='dict')
), data_root=args.data_root, random_flip=True)
test2015 = HICODetObject(HICODet(
root=os.path.join(args.data_root, "hico_20160224_det/images/test2015"),
anno_file=os.path.join(args.data_root, "instances_test2015.json"),
target_transform=pocket.ops.ToTensor(input_format='dict')
), data_root=args.data_root)
def div(a, b):
return a / b
use_train2015 = 'train2015' in args.training_data
use_test2015 = 'test2015' in args.training_data
if len(args.training_data) == 1 and use_train2015:
trainset = train2015
aspect_ratios = [div(*train2015.dataset.image_size(i)) for i in range(len(train2015))]
elif len(args.training_data) == 1 and use_test2015:
trainset = test2015
aspect_ratios = [div(*test2015.dataset.image_size(i)) for i in range(len(test2015))]
elif len(args.training_data) == 2 and use_train2015 and use_train2015:
trainset = DatasetConcat(train2015, test2015)
aspect_ratios = [
div(*train2015.dataset.image_size(i)) for i in range(len(train2015))
] + [div(*test2015.dataset.image_size(i)) for i in range(len(test2015))]
else:
raise ValueError("Unknown dataset partition in ", args.training_data)
sampler = torch.utils.data.RandomSampler(trainset)
group_ids = create_aspect_ratio_groups(aspect_ratios, k=args.aspect_ratio_group_factor)
batch_sampler = GroupedBatchSampler(sampler, group_ids, args.batch_size)
train_loader = DataLoader(
dataset=trainset, batch_sampler=batch_sampler,
num_workers=4, collate_fn=collate_fn,
)
val_loader = DataLoader(
dataset=test2015, batch_size=1, shuffle=False,
num_workers=4, collate_fn=collate_fn
)
net = pocket.models.fasterrcnn_resnet_fpn('resnet50', pretrained=True)
net.cuda()
engine = DetectorEngine(
net, train_loader, val_loader,
print_interval=args.print_interval,
cache_dir=args.cache_dir,
optim_params=dict(
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
),
lr_scheduler=True,
lr_sched_params=dict(
milestones=args.milestones,
gamma=args.lr_decay
)
)
engine(args.num_epochs)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Fine-tune Faster R-CNN on HICO-DET")
parser.add_argument('--data-root', type=str, default='../')
parser.add_argument('--training-data', nargs='+', default=['train2015',], type=str)
parser.add_argument('--num-epochs', default=15, type=int)
parser.add_argument('--random-seed', default=1, type=int)
parser.add_argument('--learning-rate', default=0.00025, type=float)
parser.add_argument('--momentum', default=0.9, type=float)
parser.add_argument('--weight-decay', default=1e-4, type=float)
parser.add_argument('--batch-size', default=2, type=int)
parser.add_argument('--milestones', nargs='+', default=[8, 12], type=int)
parser.add_argument('--lr-decay', default=0.1, type=float)
parser.add_argument('--aspect-ratio-group-factor', default=3, type=int)
parser.add_argument('--print-interval', default=2000, type=int)
parser.add_argument('--cache-dir', type=str, default='./checkpoints')
args = parser.parse_args()
print(args)
main(args)
|
the-stack_106_28153 | __all__ = ['call_echo', 'call_mkdir', 'call_rmdir', 'call_ls']
from pathlib import Path
import shutil
from ..namedregistry import export
from .baseops import subprocess_run
@export(name='echo')
def call_echo(value, verbose=False):
result = subprocess_run(['echo', 'hello', value])
if verbose:
print(result)
return result
@export(name='mkdir')
def call_mkdir(path, verbose=False):
path = Path(path)
return path.mkdir(parents=True, exist_ok=True)
@export(name='rmdir')
def call_rmdir(path, force=False, verbose=False):
return shutil.rmtree(path, True, None)
@export(name='ls')
def call_ls(path, verbose=False):
path = Path(path)
result = list(path.iterdir())
print(result)
return result
|
the-stack_106_28154 | # coding=utf-8
from __future__ import division
import logging
from itertools import permutations
from os.path import join
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
from joblib import Memory
from koino.plot.base import hist, matshow_colorbar
from .stability import ClusteringAnalysis
from scipy.sparse import csr_matrix, issparse, spmatrix
from sklearn import cluster
from sklearn.base import ClusterMixin
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import SpectralCoclustering
from sklearn.neighbors import kneighbors_graph
from .hierarchical import HierarchicalClustering
from ..plot.clusters import plot_cluster_assignments
clustering_algorithms = [
"KMeans",
"AffinityPropagation",
"MeanShift",
"SpectralClustering",
"Ward",
"AgglomerativeClustering",
"AgglomerativeCosine",
"DBSCAN",
"Birch",
]
logger = logging.getLogger(__name__)
random_state_typ = Optional[Union[int, np.random.RandomState]]
def default_kmeans(
n_clusters: int,
n_samples: Optional[int] = None,
verbose: int = 0,
random_state: random_state_typ = None,
) -> KMeans:
"""Sensible defaults for clustering that is a tiny bit more robust.
Full-batch KMeans is unbearably slow for large datasets.
Can init more than 100 times, how much time do you have?
"""
if n_samples and n_samples > int(1e5):
instance = MiniBatchKMeans(
max_no_improvement=100,
batch_size=1000,
verbose=verbose,
max_iter=400,
n_init=100,
n_clusters=n_clusters,
random_state=random_state,
)
else:
instance = KMeans(
n_clusters=n_clusters,
n_jobs=-1,
max_iter=400,
n_init=100,
random_state=random_state,
precompute_distances=True,
verbose=verbose,
)
return instance
def clustering(
X: Union[pd.DataFrame, np.ndarray],
algorithm: str,
n_clusters: int = 10,
verbose: int = 0,
random_state: random_state_typ = None,
) -> np.ndarray:
"""Compute cluster assignments for given array and clustering algorithm."""
model = None
n_samples = X.shape[0]
# Choose algo
if algorithm == "KMeans":
model = default_kmeans(n_clusters, n_samples=n_samples)
elif algorithm == "Birch":
model = cluster.Birch(n_clusters=n_clusters)
elif algorithm == "DBSCAN":
model = cluster.DBSCAN(n_jobs=-1)
elif algorithm == "AffinityPropagation":
model = cluster.AffinityPropagation(verbose=verbose)
elif algorithm == "MeanShift":
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(
X, quantile=0.3, random_state=random_state, n_jobs=-1
)
logger.debug("[MeanShift] Bandwith={}".format(bandwidth))
model = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True, n_jobs=-1)
elif algorithm == "SpectralClustering":
model = cluster.SpectralClustering(
n_clusters=n_clusters,
eigen_solver="arpack",
affinity="nearest_neighbors",
n_init=100,
n_jobs=-1,
)
elif algorithm in ("Ward", "AgglomerativeClustering", "AgglomerativeCosine"):
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(
X, n_neighbors=10, include_self=False, n_jobs=-1
)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
if algorithm == "Ward":
model = cluster.AgglomerativeClustering(
n_clusters=n_clusters, linkage="ward", connectivity=connectivity
)
elif algorithm == "AgglomerativeClustering":
model = cluster.AgglomerativeClustering(
linkage="average",
affinity="euclidean",
n_clusters=n_clusters,
connectivity=connectivity,
)
elif algorithm == "AgglomerativeCosine":
model = cluster.AgglomerativeClustering(
n_clusters=n_clusters, affinity="cosine", linkage="average"
)
model.fit(X)
if hasattr(model, "labels_"):
y_pred = model.labels_.astype(np.int)
else:
y_pred = model.predict(X)
return y_pred
def compare_clustering(
X: np.ndarray,
X_tsne: np.ndarray,
n_clusters: int,
figures_dir: str,
verbose: int,
transparent: int,
) -> Dict[str, np.ndarray]:
labels = {}
for algo in clustering_algorithms:
logger.info("[Clustering] Algo: {}".format(algo))
assignments = clustering(X, algo, n_clusters, verbose)
plot_cluster_assignments(
X_tsne,
assignments,
n_clusters,
figures_dir,
transparent=transparent,
title=algo,
)
labels[algo] = assignments
return labels
def init_coclustering(
rows: np.ndarray,
cols: np.ndarray,
output_dir: Optional[str] = None,
row_label: Optional[str] = "Socio-demo",
col_label: Optional[str] = "Diet",
transparent: bool = False,
) -> csr_matrix:
dat = np.ones_like(rows, dtype=np.float32)
X_sp = csr_matrix((dat, (rows, cols)), dtype=np.float32)
if output_dir:
hist_path = join(output_dir, "Co-clustering assignments histogram")
hist(X_sp.data, hist_path, xlabel=row_label, ylabel=col_label)
cooc_path = join(output_dir, "Cooc values histogram")
cooc_title = "Co-clustering original matrix"
matshow_colorbar(
X_sp.A,
cooc_path,
cooc_title,
xlabel=row_label,
ylabel=col_label,
transparent=transparent,
)
return X_sp
def spectral_coclustering(
X: Union[np.ndarray, spmatrix],
n_clusters: int,
output_dir: Optional[str] = None,
row_label: Optional[str] = "Socio-demo",
col_label: Optional[str] = "Diet",
transparent=False,
random_state: random_state_typ = None,
) -> Union[np.ndarray, spmatrix]:
"""Run spectral co-clustering on a sparse or dense matrix and
visualize the result.
Parameters
----------
X: numpy array, scipy sparse matrix of shape [n, m]
n_clusters: int
output_dir: str, path
row_label: str
col_label: str
transparent: bool
random_state: int, np.random.RandomState or None
Returns
-------
X_perm: numpy array, scipy sparse matrix of shape [n, m]
"""
model = SpectralCoclustering(
n_clusters=n_clusters, random_state=random_state, n_jobs=-1
)
model.fit(X)
X_perm = X[np.argsort(model.row_labels_)]
X_perm = X_perm[:, np.argsort(model.column_labels_)]
fit_data_dense = X_perm.A if issparse(X_perm) else X_perm
if output_dir:
figpath = join(output_dir, "spectral_coclst_{}.png".format(n_clusters))
matshow_colorbar(
fit_data_dense,
figpath,
"Rearranged clusters",
xlabel=row_label,
ylabel=col_label,
transparent=transparent,
)
# XXX: This just takes ages, do not use blindly
# score = consensus_score(model.biclusters_, X.nonzero())
# logger.info('[Coclustering] Consensus score={}'.format(score))
return X_perm
def vector_quantization(
X: np.ndarray,
n_clusters: Optional[int] = None,
clusters_span: Optional[Union[List[int], Tuple[int]]] = None,
hac_dist: Optional[float] = None,
**kwargs
) -> Tuple[np.ndarray, np.ndarray, int, ClusterMixin]:
"""Solve a vector quantization problem and optionally visualize
clustering assignments.
Parameters
----------
X: numpy array of shape [n_samples, n_features]
n_clusters: int (optional)
clusters_span: range (optional)
hac_dist:
Returns
-------
assignments: numpy array of shape [n_samples,]
centroids: numpy array of shape [n_clusters, n_features]
n_clusters: int
clst: object
"""
if clusters_span:
# Check that clusters' span is correct
if isinstance(clusters_span, (list, tuple)):
clusters_span = range(*clusters_span)
assert clusters_span.start >= 2, clusters_span.start
assert len(clusters_span) >= 2, clusters_span
if n_clusters:
assert clusters_span.start <= n_clusters <= clusters_span.stop
logger.info("[VQ] Clusters spanning {}".format(clusters_span))
# Find optimal number of clusters and predict labels
clst = ClusteringAnalysis(
clustering_model=default_kmeans, clusters_span=clusters_span, **kwargs
)
elif n_clusters:
logger.info("[VQ] Quantizing with {} centroids".format(n_clusters))
clst = default_kmeans(n_clusters)
elif hac_dist:
logger.info("[HAC] Quantizing at distance {}".format(hac_dist))
clst = HierarchicalClustering(hac_dist)
else:
raise ValueError("No. clusters, clusters span or HAC dist expectd")
assignments = clst.fit_predict(X)
centroids = clst.cluster_centers_
n_clusters = clst.n_clusters
return assignments, centroids, n_clusters, clst
def run_hdbscan(X_df, X_tsne, output_dir, transparent):
"""Cluster using density estimation
Parameters
----------
X_df: DataFrame
X_tsne: array-like, [n_samples, 2]
output_dir: str, path
transparent: bool
Returns
-------
clusterer: HDBSCAN object
assignments: numpy array of shape [n_samples,]
"""
from hdbscan import HDBSCAN
clusterer = HDBSCAN(
core_dist_n_jobs=-1,
cluster_selection_method="eom", # 'leaf',
approx_min_span_tree=False,
min_cluster_size=100,
min_samples=1,
leaf_size=100,
gen_min_span_tree=True,
# alpha=10.,
memory=Memory(cachedir=None, verbose=0),
)
assignments = clusterer.fit_predict(X_df)
centroid_labels, counts = np.unique(assignments, return_counts=True)
n_clusters = len(centroid_labels)
assignments[assignments == -1] = n_clusters - 1
logger.info("[HDBSCAN] Found {} clusters".format(n_clusters))
logger.info("[HDBSCAN] Cluster assignments:\n{}".format(counts))
logger.info(
"[HDBSCAN] Cluster persistence:\n{}".format(clusterer.cluster_persistence_)
)
return assignments, clusterer.exemplars_, n_clusters, clusterer
def visualize_hdbscan(
clusterer, X_projected, assignments, n_clusters, output_dir, transparent
):
"""Visualize HDBSCAN results
Parameters
----------
clusterer: object
X_projected: array - like, [n_samples, 2]
assignments
n_clusters
output_dir: str, path
transparent
"""
probas_fp = join(output_dir, "HDBSCAN_sample_probas.png")
outliers_fp = join(output_dir, "HDBSCAN_outliers.png")
hist(clusterer.probabilities_, probas_fp)
hist(clusterer.outlier_scores_, outliers_fp)
plot_cluster_assignments(
X_projected,
assignments,
"HDBSCAN assignments",
n_clusters,
output_dir,
transparent,
)
def meila_distance(clustering1, clustering2, num_clusters):
n_samples = len(clustering1)
clustering_1 = np.zeros((n_samples, num_clusters))
clustering_2 = np.zeros((n_samples, num_clusters))
for x in range(0, n_samples):
clustering_1[x, clustering1[x]] += 1
clustering_2[x, clustering2[x]] += 1
confusion_matrix = np.dot(np.transpose(clustering_1), clustering_2)
max_confusion = 0
for perm in permutations(range(0, num_clusters)):
confusion = 0
for i in range(0, num_clusters):
confusion += confusion_matrix[i, perm[i]]
if max_confusion < confusion:
max_confusion = confusion
distance = 1 - (max_confusion / n_samples)
return distance
|
the-stack_106_28155 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import sys
if not (sys.version_info.major == 3 and sys.version_info.minor > 5):
print("Python version %s.%s not supported version 3.6 or above required - exiting" % (sys.version_info.major,sys.version_info.minor))
sys.exit(1)
import os
import io
for path in [os.getcwd(),"Util","SchemaPages","SchemaExamples"]:
sys.path.insert( 1, path ) #Pickup libs from local directories
from buildsite import *
from sdotermsource import SdoTermSource
from sdoterm import *
from localmarkdown import Markdown
VOCABURI = SdoTermSource.vocabUri()
###################################################
#MARKDOWN INITIALISE
###################################################
Markdown.setWikilinkCssClass("localLink")
Markdown.setWikilinkPrePath("https://schema.org/")
#Production site uses no suffix in link - mapping to file done in server config
Markdown.setWikilinkPostPath("")
def fileName(fn):
ret = OUTPUTDIR + '/' + fn
checkFilePath(os.path.dirname(ret))
return ret
CACHECONTEXT = None
def jsonldcontext(page):
global CACHECONTEXT
from sdojsonldcontext import createcontext
if not CACHECONTEXT:
CACHECONTEXT = createcontext()
return CACHECONTEXT
import json
def jsonldtree(page):
global VISITLIST
VISITLIST=[]
term = {}
context = {}
context['rdfs'] = "http://www.w3.org/2000/01/rdf-schema#"
context['schema'] = "https://schema.org"
context['rdfs:subClassOf'] = { "@type": "@id" }
context['description'] = "rdfs:comment"
context['children'] = { "@reverse": "rdfs:subClassOf" }
term['@context'] = context
data = _jsonldtree("Thing",term)
return json.dumps(data,indent=3)
def _jsonldtree(tid,term=None):
termdesc = SdoTermSource.getTerm(tid)
if not term:
term = {}
term['@type'] = "rdfs:Class"
term['@id'] = "schema:" + termdesc.id
term['name'] = termdesc.label
if termdesc.supers:
sups = []
for sup in termdesc.supers:
sups.append("schema:" + sup)
if len(sups) == 1:
term['rdfs:subClassOf'] = sups[0]
else:
term['rdfs:subClassOf'] = sups
term['description'] = ShortenOnSentence(StripHtmlTags(termdesc.comment))
if termdesc.pending:
term['pending'] = True
if termdesc.retired:
term['attic'] = True
if tid not in VISITLIST:
VISITLIST.append(tid)
if termdesc.subs:
subs = []
for sub in termdesc.subs:
subs.append(_jsonldtree(sub))
term['children'] = subs
return term
def owl(page):
from sdoowl import OwlBuild
return OwlBuild().getContent()
def sitemap(page):
node = """ <url>
<loc>https://schema.org/%s</loc>
<lastmod>%s</lastmod>
</url>
"""
STATICPAGES = ["docs/schemas.html",
"docs/full.html",
"docs/gs.html",
"docs/about.html",
"docs/howwework.html",
"docs/releases.html",
"docs/faq.html",
"docs/datamodel.html",
"docs/developers.html",
"docs/extension.html",
"docs/meddocs.html",
"docs/hotels.html"]
output = []
output.append("""<?xml version="1.0" encoding="utf-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
""")
terms = SdoTermSource.getAllTerms(supressSourceLinks=True)
ver = getVersionDate(getVersion())
for term in terms:
if not (term.startswith("http://") or term.startswith("https://")):
output.append(node % (term,ver))
for term in STATICPAGES:
output.append(node % (term,ver))
output.append("</urlset>\n")
return "".join(output)
def prtocolswap(content,protocol,altprotocol):
ret = content.replace("%s://schema.org" % protocol,"%s://schema.org" % altprotocol)
for ext in ["attic","auto","bib","health-lifesci","meta","pending"]:
ret = ret.replace("%s://%s.schema.org" % (protocol,ext),"%s://%s.schema.org" % (altprotocol,ext))
return ret
def protocols():
protocol="http"
altprotocol="https"
if VOCABURI.startswith("https"):
protocol="https"
altprotocol="http"
return protocol,altprotocol
import rdflib
from rdflib.serializer import Serializer
import rdflib_jsonld
rdflib.plugin.register("json-ld", Serializer, "rdflib_jsonld.serializer", "JsonLDSerializer")
allGraph = None
currentGraph = None
def exportrdf(exportType):
global allGraph, currentGraph
if not allGraph:
allGraph = rdflib.Graph()
currentGraph = rdflib.Graph()
allGraph += SdoTermSource.sourceGraph()
deloddtriples = """DELETE {?s ?p ?o}
WHERE {
?s ?p ?o.
FILTER (! (strstarts(str(?s), "http://schema.org") || strstarts(str(?s), "http://schema.org") )).
}"""
allGraph.update(deloddtriples)
currentGraph += allGraph
protocol, altprotocol = protocols()
delattic="""PREFIX schema: <%s://schema.org/>
DELETE {?s ?p ?o}
WHERE{
?s ?p ?o;
schema:isPartOf <%s://attic.schema.org>.
}""" % (protocol,protocol)
currentGraph.update(delattic)
formats = ["json-ld", "turtle", "nt", "nquads", "rdf"]
extype = exportType[len("RDFExport."):]
if exportType == "RDFExports":
for format in sorted(formats):
_exportrdf(format,allGraph,currentGraph)
elif extype in formats:
_exportrdf(extype,allGraph,currentGraph)
else:
raise Exception("Unknown export format: %s" % exportType)
completed = []
def _exportrdf(format,all,current):
global completed
exts = {"xml":".xml","rdf":".rdf","nquads":".nq","nt": ".nt","json-ld": ".jsonld", "turtle":".ttl"}
protocol, altprotocol = protocols()
if format in completed:
return
else:
completed.append(format)
for ver in ["current","all"]:
if ver == "all":
g = all
else:
g = current
if format == "nquads":
gr = rdflib.Dataset()
qg = gr.graph(URIRef("%s://schema.org/%s" % (protocol,getVersion())))
qg += g
g = gr
fn = fileName("releases/%s/schemaorg-%s-%s%s" % (getVersion(),ver,protocol,exts[format]))
afn = fileName("releases/%s/schemaorg-%s-%s%s" % (getVersion(),ver,altprotocol,exts[format]))
fmt = format
if format == "rdf":
fmt = "pretty-xml"
f = open(fn,"w")
af = open(afn,"w")
kwargs = {'sort_keys': True}
out = g.serialize(format=fmt,auto_compact=True,**kwargs).decode()
f.write(out)
print(fn)
af.write(prtocolswap(out,protocol=protocol,altprotocol=altprotocol))
print(afn)
f.close()
af.close()
def array2str(ar):
if not ar or not len(ar):
return ""
buf = []
first = True
for i in ar:
if first:
first = False
else:
buf.append(', ')
buf.append(i)
return "".join(buf)
def uriwrap(ids):
single = False
if not isinstance(ids, list):
single = True
ids = [ids]
ret = []
for i in ids:
if i and len(i):
ret.append(VOCABURI + i)
else:
ret.append("")
if single:
return ret[0]
if not len(ret):
return ""
return array2str(ret)
def exportcsv(page):
protocol, altprotocol = protocols()
typeFields = ["id","label","comment","subTypeOf","enumerationtype","equivalentClass","properties","subTypes","supersedes","supersededBy","isPartOf"]
propFields = ["id","label","comment","subPropertyOf","equivalentProperty","subproperties","domainIncludes","rangeIncludes","inverseOf","supersedes","supersededBy","isPartOf"]
typedata = []
typedataAll = []
propdata = []
propdataAll = []
terms = SdoTermSource.getAllTerms(expanded=True,supressSourceLinks=True)
for term in terms:
if term.termType == SdoTerm.REFERENCE or term.id.startswith("http://") or term.id.startswith("https://"):
continue
row = {}
row["id"] = term.uri
row["label"] = term.label
row["comment"] = term.comment
row["supersedes"] = uriwrap(term.supersedes)
row["supersededBy"] = uriwrap(term.supersededBy)
#row["isPartOf"] = term.isPartOf
row["isPartOf"] = ""
if term.termType == SdoTerm.PROPERTY:
row["subPropertyOf"] = uriwrap(term.supers)
row["equivalentProperty"] = array2str(term.equivalents)
row["subproperties"] = uriwrap(term.subs)
row["domainIncludes"] = uriwrap(term.domainIncludes)
row["rangeIncludes"] = uriwrap(term.rangeIncludes)
row["inverseOf"] = uriwrap(term.inverse)
propdataAll.append(row)
if not term.retired:
propdata.append(row)
else:
row["subTypeOf"] = uriwrap(term.supers)
if term.termType == SdoTerm.ENUMERATIONVALUE:
row["enumerationtype"] = uriwrap(term.enumerationParent)
else:
row["properties"] = uriwrap(term.allproperties)
row["equivalentClass"] = array2str(term.equivalents)
row["subTypes"] = uriwrap(term.subs)
typedataAll.append(row)
if not term.retired:
typedata.append(row)
writecsvout(propdata,propFields,"current",protocol,altprotocol)
writecsvout(propdataAll,propFields,"all",protocol,altprotocol)
writecsvout(typedata,typeFields,"current",protocol,altprotocol)
writecsvout(typedataAll,typeFields,"all",protocol,altprotocol)
def writecsvout(data,fields,ver,protocol,altprotocol):
import csv
fn = fileName("releases/%s/schemaorg-%s-%s-properties.csv" % (getVersion(),ver,protocol))
afn = fileName("releases/%s/schemaorg-%s-%s-properties.csv" % (getVersion(),ver,altprotocol))
csvout = io.StringIO()
csvfile = open(fn,'w')
acsvfile = open(afn,'w')
writer = csv.DictWriter(csvout, fieldnames=fields)
writer.writeheader()
for row in data:
writer.writerow(row)
csvfile.write(csvout.getvalue())
print(fn)
csvfile.close()
acsvfile.write(prtocolswap(csvout.getvalue(),protocol=protocol,altprotocol=altprotocol))
print(afn)
acsvfile.close()
csvout.close()
def examples(page):
return SchemaExamples.allExamplesSerialised()
FILELIST = { "Context": (jsonldcontext,["docs/jsonldcontext.jsonld",
"docs/jsonldcontext.json","docs/jsonldcontext.json.txt",
"releases/%s/schemaorgcontext.jsonld" % getVersion()]),
"Tree": (jsonldtree,["docs/tree.jsonld"]),
"Owl": (owl,["docs/schemaorg.owl","releases/%s/schemaorg.owl" % getVersion()]),
"Sitemap": (sitemap,["docs/sitemap.xml"]),
"RDFExports": (exportrdf,[""]),
"RDFExport.turtle": (exportrdf,[""]),
"RDFExport.rdf": (exportrdf,[""]),
"RDFExport.nt": (exportrdf,[""]),
"RDFExport.nquads": (exportrdf,[""]),
"RDFExport.json-ld": (exportrdf,[""]),
"CSVExports": (exportcsv,[""]),
"Examples": (examples,["releases/%s/all_examples.txt" % getVersion()])
}
def buildFiles(files):
all = ["ALL","All","all"]
for a in all:
if a in files:
files = sorted(FILELIST.keys())
break
for p in files:
print("%s:"%p)
if p in FILELIST.keys():
func, filenames = FILELIST.get(p,None)
if func:
content = func(p)
if content:
for filename in filenames:
fn = fileName(filename)
f = open(fn,"w")
f.write(content)
f.close()
print("Created %s" % fn)
else:
print("Unknown files name: %s" % p)
|
the-stack_106_28160 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTqdm(PythonPackage):
"""A Fast, Extensible Progress Meter"""
homepage = "https://github.com/tqdm/tqdm"
url = "https://pypi.io/packages/source/t/tqdm/tqdm-4.45.0.tar.gz"
version('4.59.0', sha256='d666ae29164da3e517fcf125e41d4fe96e5bb375cd87ff9763f6b38b5592fe33')
version('4.45.0', sha256='00339634a22c10a7a22476ee946bbde2dbe48d042ded784e4d88e0236eca5d81')
version('4.36.1', sha256='abc25d0ce2397d070ef07d8c7e706aede7920da163c64997585d42d3537ece3d')
version('4.8.4', sha256='bab05f8bb6efd2702ab6c532e5e6a758a66c0d2f443e09784b73e4066e6b3a37')
variant('telegram', default=False, description='Enable Telegram bot support')
variant('notebook', default=False, description='Enable Jupyter Notebook support')
depends_on('[email protected]:2.8,3.4:', type=('build', 'run'))
depends_on('py-setuptools@42:', type=('build', 'run'))
depends_on('[email protected]:+toml', type='build')
depends_on('py-requests', when='+telegram', type=('build', 'run'))
depends_on('py-ipywidgets@6:', when='+notebook', type=('build', 'run'))
|
the-stack_106_28162 | # -*- coding: utf-8 -*-
import logging
import math
import os
import time
import datetime
import traceback
from typing import Dict, Optional, List, Tuple, Union, Iterable, Any
import torch
import torch.optim.lr_scheduler
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError, parse_cuda_device
from allennlp.common.util import (dump_metrics, gpu_memory_mb, peak_memory_mb,
lazy_groups_of)
from allennlp.common.tqdm import Tqdm
from allennlp.data.instance import Instance
from allennlp.data.iterators.data_iterator import DataIterator, TensorDict
from allennlp.models.model import Model
from allennlp.nn import util as nn_util
from allennlp.training.checkpointer import Checkpointer
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.momentum_schedulers import MomentumScheduler
from allennlp.training.metric_tracker import MetricTracker
from allennlp.training.optimizers import Optimizer
from allennlp.training.tensorboard_writer import TensorboardWriter
from allennlp.training.trainer_base import TrainerBase
from allennlp.training import util as training_util
from allennlp.training.moving_average import MovingAverage
from acsa.acsc_pytorch import allennlp_callback
from acsa.acsc_pytorch.acsc_models import Estimator
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@TrainerBase.register("MyTrainer")
class Trainer(TrainerBase):
"""
1. 支持每个epoch之后评估测试集 todo
2. 支持多任务的loss组合 todo
"""
def __init__(self,
model: Model,
optimizer: torch.optim.Optimizer,
iterator: DataIterator,
train_dataset: Iterable[Instance],
validation_dataset: Optional[Iterable[Instance]] = None,
patience: Optional[int] = None,
validation_metric: str = "-loss",
validation_iterator: DataIterator = None,
shuffle: bool = True,
num_epochs: int = 20,
serialization_dir: Optional[str] = None,
num_serialized_models_to_keep: int = 20,
keep_serialized_model_every_num_seconds: int = None,
checkpointer: Checkpointer = None,
model_save_interval: float = None,
cuda_device: Union[int, List] = -1,
grad_norm: Optional[float] = None,
grad_clipping: Optional[float] = None,
learning_rate_scheduler: Optional[LearningRateScheduler] = None,
momentum_scheduler: Optional[MomentumScheduler] = None,
summary_interval: int = 100,
histogram_interval: int = None,
should_log_parameter_statistics: bool = True,
should_log_learning_rate: bool = False,
log_batch_size_period: Optional[int] = None,
moving_average: Optional[MovingAverage] = None,
callbacks: List[allennlp_callback.Callback]=None,
early_stopping_by_batch: bool=True,
estimator: Estimator=None,
) -> None:
"""
A trainer for doing supervised learning. It just takes a labeled dataset
and a ``DataIterator``, and uses the supplied ``Optimizer`` to learn the weights
for your model over some fixed number of epochs. You can also pass in a validation
dataset and enable early stopping. There are many other bells and whistles as well.
Parameters
----------
model : ``Model``, required.
An AllenNLP model to be optimized. Pytorch Modules can also be optimized if
their ``forward`` method returns a dictionary with a "loss" key, containing a
scalar tensor representing the loss function to be optimized.
If you are training your model using GPUs, your model should already be
on the correct device. (If you use `Trainer.from_params` this will be
handled for you.)
optimizer : ``torch.nn.Optimizer``, required.
An instance of a Pytorch Optimizer, instantiated with the parameters of the
model to be optimized.
iterator : ``DataIterator``, required.
A method for iterating over a ``Dataset``, yielding padded indexed batches.
train_dataset : ``Dataset``, required.
A ``Dataset`` to train on. The dataset should have already been indexed.
validation_dataset : ``Dataset``, optional, (default = None).
A ``Dataset`` to evaluate on. The dataset should have already been indexed.
patience : Optional[int] > 0, optional (default=None)
Number of epochs to be patient before early stopping: the training is stopped
after ``patience`` epochs with no improvement. If given, it must be ``> 0``.
If None, early stopping is disabled.
validation_metric : str, optional (default="loss")
Validation metric to measure for whether to stop training using patience
and whether to serialize an ``is_best`` model each epoch. The metric name
must be prepended with either "+" or "-", which specifies whether the metric
is an increasing or decreasing function.
validation_iterator : ``DataIterator``, optional (default=None)
An iterator to use for the validation set. If ``None``, then
use the training `iterator`.
shuffle: ``bool``, optional (default=True)
Whether to shuffle the instances in the iterator or not.
num_epochs : int, optional (default = 20)
Number of training epochs.
serialization_dir : str, optional (default=None)
Path to directory for saving and loading model files. Models will not be saved if
this parameter is not passed.
num_serialized_models_to_keep : ``int``, optional (default=20)
Number of previous model checkpoints to retain. Default is to keep 20 checkpoints.
A value of None or -1 means all checkpoints will be kept.
keep_serialized_model_every_num_seconds : ``int``, optional (default=None)
If num_serialized_models_to_keep is not None, then occasionally it's useful to
save models at a given interval in addition to the last num_serialized_models_to_keep.
To do so, specify keep_serialized_model_every_num_seconds as the number of seconds
between permanently saved checkpoints. Note that this option is only used if
num_serialized_models_to_keep is not None, otherwise all checkpoints are kept.
checkpointer : ``Checkpointer``, optional (default=None)
An instance of class Checkpointer to use instead of the default. If a checkpointer is specified,
the arguments num_serialized_models_to_keep and keep_serialized_model_every_num_seconds should
not be specified. The caller is responsible for initializing the checkpointer so that it is
consistent with serialization_dir.
model_save_interval : ``float``, optional (default=None)
If provided, then serialize models every ``model_save_interval``
seconds within single epochs. In all cases, models are also saved
at the end of every epoch if ``serialization_dir`` is provided.
cuda_device : ``Union[int, List[int]]``, optional (default = -1)
An integer or list of integers specifying the CUDA device(s) to use. If -1, the CPU is used.
grad_norm : ``float``, optional, (default = None).
If provided, gradient norms will be rescaled to have a maximum of this value.
grad_clipping : ``float``, optional (default = ``None``).
If provided, gradients will be clipped `during the backward pass` to have an (absolute)
maximum of this value. If you are getting ``NaNs`` in your gradients during training
that are not solved by using ``grad_norm``, you may need this.
learning_rate_scheduler : ``LearningRateScheduler``, optional (default = None)
If specified, the learning rate will be decayed with respect to
this schedule at the end of each epoch (or batch, if the scheduler implements
the ``step_batch`` method). If you use :class:`torch.optim.lr_scheduler.ReduceLROnPlateau`,
this will use the ``validation_metric`` provided to determine if learning has plateaued.
To support updating the learning rate on every batch, this can optionally implement
``step_batch(batch_num_total)`` which updates the learning rate given the batch number.
momentum_scheduler : ``MomentumScheduler``, optional (default = None)
If specified, the momentum will be updated at the end of each batch or epoch
according to the schedule.
summary_interval: ``int``, optional, (default = 100)
Number of batches between logging scalars to tensorboard
histogram_interval : ``int``, optional, (default = ``None``)
If not None, then log histograms to tensorboard every ``histogram_interval`` batches.
When this parameter is specified, the following additional logging is enabled:
* Histograms of model parameters
* The ratio of parameter update norm to parameter norm
* Histogram of layer activations
We log histograms of the parameters returned by
``model.get_parameters_for_histogram_tensorboard_logging``.
The layer activations are logged for any modules in the ``Model`` that have
the attribute ``should_log_activations`` set to ``True``. Logging
histograms requires a number of GPU-CPU copies during training and is typically
slow, so we recommend logging histograms relatively infrequently.
Note: only Modules that return tensors, tuples of tensors or dicts
with tensors as values currently support activation logging.
should_log_parameter_statistics : ``bool``, optional, (default = True)
Whether to send parameter statistics (mean and standard deviation
of parameters and gradients) to tensorboard.
should_log_learning_rate : ``bool``, optional, (default = False)
Whether to send parameter specific learning rate to tensorboard.
log_batch_size_period : ``int``, optional, (default = ``None``)
If defined, how often to log the average batch size.
moving_average: ``MovingAverage``, optional, (default = None)
If provided, we will maintain moving averages for all parameters. During training, we
employ a shadow variable for each parameter, which maintains the moving average. During
evaluation, we backup the original parameters and assign the moving averages to corresponding
parameters. Be careful that when saving the checkpoint, we will save the moving averages of
parameters. This is necessary because we want the saved model to perform as well as the validated
model if we load it later. But this may cause problems if you restart the training from checkpoint.
"""
super().__init__(serialization_dir, cuda_device)
# I am not calling move_to_gpu here, because if the model is
# not already on the GPU then the optimizer is going to be wrong.
self.model = model
self.iterator = iterator
self._validation_iterator = validation_iterator
self.shuffle = shuffle
self.optimizer = optimizer
self.train_data = train_dataset
self._validation_data = validation_dataset
if patience is None: # no early stopping
if validation_dataset:
logger.warning('You provided a validation dataset but patience was set to None, '
'meaning that early stopping is disabled')
elif (not isinstance(patience, int)) or patience <= 0:
raise ConfigurationError('{} is an invalid value for "patience": it must be a positive integer '
'or None (if you want to disable early stopping)'.format(patience))
# For tracking is_best_so_far and should_stop_early
self._metric_tracker = MetricTracker(patience, validation_metric)
# Get rid of + or -
self._validation_metric = validation_metric[1:]
self._num_epochs = num_epochs
if checkpointer is not None:
# We can't easily check if these parameters were passed in, so check against their default values.
# We don't check against serialization_dir since it is also used by the parent class.
if num_serialized_models_to_keep != 20 or \
keep_serialized_model_every_num_seconds is not None:
raise ConfigurationError(
"When passing a custom Checkpointer, you may not also pass in separate checkpointer "
"args 'num_serialized_models_to_keep' or 'keep_serialized_model_every_num_seconds'.")
self._checkpointer = checkpointer
else:
self._checkpointer = Checkpointer(serialization_dir,
keep_serialized_model_every_num_seconds,
num_serialized_models_to_keep)
self._model_save_interval = model_save_interval
self._grad_norm = grad_norm
self._grad_clipping = grad_clipping
self._learning_rate_scheduler = learning_rate_scheduler
self._momentum_scheduler = momentum_scheduler
self._moving_average = moving_average
# We keep the total batch number as an instance variable because it
# is used inside a closure for the hook which logs activations in
# ``_enable_activation_logging``.
self._batch_num_total = 0
self._tensorboard = TensorboardWriter(
get_batch_num_total=lambda: self._batch_num_total,
serialization_dir=serialization_dir,
summary_interval=summary_interval,
histogram_interval=histogram_interval,
should_log_parameter_statistics=should_log_parameter_statistics,
should_log_learning_rate=should_log_learning_rate)
self._log_batch_size_period = log_batch_size_period
self._last_log = 0.0 # time of last logging
# Enable activation logging.
if histogram_interval is not None:
self._tensorboard.enable_activation_logging(self.model)
self.callbacks = callbacks
self._early_stopping_by_batch = early_stopping_by_batch
self._estimator = estimator
def rescale_gradients(self) -> Optional[float]:
return training_util.rescale_gradients(self.model, self._grad_norm)
def batch_loss(self, batch_group: List[TensorDict], for_training: bool) -> torch.Tensor:
"""
Does a forward pass on the given batches and returns the ``loss`` value in the result.
If ``for_training`` is `True` also applies regularization penalty.
"""
if self._multiple_gpu:
output_dict = training_util.data_parallel(batch_group, self.model, self._cuda_devices)
else:
assert len(batch_group) == 1
batch = batch_group[0]
batch = nn_util.move_to_device(batch, self._cuda_devices[0])
output_dict = self.model(**batch)
try:
loss = output_dict["loss"]
if for_training:
loss += self.model.get_regularization_penalty()
except KeyError:
if for_training:
raise RuntimeError("The model you are trying to optimize does not contain a"
" 'loss' key in the output of model.forward(inputs).")
loss = None
return loss
def _train_epoch(self, epoch: int) -> Dict[str, float]:
"""
Trains one epoch and returns metrics.
"""
logger.info("Epoch %d/%d", epoch, self._num_epochs - 1)
peak_cpu_usage = peak_memory_mb()
logger.info(f"Peak CPU memory usage MB: {peak_cpu_usage}")
gpu_usage = []
for gpu, memory in gpu_memory_mb().items():
gpu_usage.append((gpu, memory))
logger.info(f"GPU {gpu} memory usage MB: {memory}")
train_loss = 0.0
# Set the model to "train" mode.
self.model.train()
num_gpus = len(self._cuda_devices)
# Get tqdm for the training batches
raw_train_generator = self.iterator(self.train_data,
num_epochs=1,
shuffle=self.shuffle)
train_generator = lazy_groups_of(raw_train_generator, num_gpus)
num_training_batches = math.ceil(self.iterator.get_num_batches(self.train_data)/num_gpus)
self._last_log = time.time()
last_save_time = time.time()
batches_this_epoch = 0
if self._batch_num_total is None:
self._batch_num_total = 0
histogram_parameters = set(self.model.get_parameters_for_histogram_tensorboard_logging())
logger.info("Training")
train_generator_tqdm = Tqdm.tqdm(train_generator,
total=num_training_batches)
cumulative_batch_size = 0
for batch_group in train_generator_tqdm:
self.model.train()
batches_this_epoch += 1
self._batch_num_total += 1
batch_num_total = self._batch_num_total
self.optimizer.zero_grad()
loss = self.batch_loss(batch_group, for_training=True)
if torch.isnan(loss):
raise ValueError("nan loss encountered")
loss.backward()
train_loss += loss.item()
batch_grad_norm = self.rescale_gradients()
# This does nothing if batch_num_total is None or you are using a
# scheduler which doesn't update per batch.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step_batch(batch_num_total)
if self._momentum_scheduler:
self._momentum_scheduler.step_batch(batch_num_total)
if self._tensorboard.should_log_histograms_this_batch():
# get the magnitude of parameter updates for logging
# We need a copy of current parameters to compute magnitude of updates,
# and copy them to CPU so large models won't go OOM on the GPU.
param_updates = {name: param.detach().cpu().clone()
for name, param in self.model.named_parameters()}
self.optimizer.step()
for name, param in self.model.named_parameters():
param_updates[name].sub_(param.detach().cpu())
update_norm = torch.norm(param_updates[name].view(-1, ))
param_norm = torch.norm(param.view(-1, )).cpu()
self._tensorboard.add_train_scalar("gradient_update/" + name,
update_norm / (param_norm + 1e-7))
else:
self.optimizer.step()
# Update moving averages
if self._moving_average is not None:
self._moving_average.apply(batch_num_total)
# Update the description with the latest metrics
metrics = training_util.get_metrics(self.model, train_loss, batches_this_epoch)
description = training_util.description_from_metrics(metrics)
train_generator_tqdm.set_description(description, refresh=False)
# Log parameter values to Tensorboard
if self._tensorboard.should_log_this_batch():
self._tensorboard.log_parameter_and_gradient_statistics(self.model, batch_grad_norm)
self._tensorboard.log_learning_rates(self.model, self.optimizer)
self._tensorboard.add_train_scalar("loss/loss_train", metrics["loss"])
self._tensorboard.log_metrics({"epoch_metrics/" + k: v for k, v in metrics.items()})
if self._tensorboard.should_log_histograms_this_batch():
self._tensorboard.log_histograms(self.model, histogram_parameters)
if self._log_batch_size_period:
cur_batch = sum([training_util.get_batch_size(batch) for batch in batch_group])
cumulative_batch_size += cur_batch
if (batches_this_epoch - 1) % self._log_batch_size_period == 0:
average = cumulative_batch_size/batches_this_epoch
logger.info(f"current batch size: {cur_batch} mean batch size: {average}")
self._tensorboard.add_train_scalar("current_batch_size", cur_batch)
self._tensorboard.add_train_scalar("mean_batch_size", average)
# Save model if needed.
if self._model_save_interval is not None and (
time.time() - last_save_time > self._model_save_interval
):
last_save_time = time.time()
self._save_checkpoint(
'{0}.{1}'.format(epoch, training_util.time_to_str(int(last_save_time)))
)
if self._early_stopping_by_batch and self._batch_num_total % 10 == 0:
if self._validation_data is not None:
with torch.no_grad():
# We have a validation set, so compute all the metrics on it.
val_loss, num_batches = self._validation_loss()
val_metrics = training_util.get_metrics(self.model, val_loss, num_batches, reset=True)
# Check validation metric for early stopping
this_epoch_val_metric = val_metrics[self._validation_metric]
self._metric_tracker.add_metric(this_epoch_val_metric)
if self._metric_tracker.is_best_so_far():
metrics['best_batch'] = self._batch_num_total
for key, value in val_metrics.items():
metrics["best_validation_" + key] = value
self._metric_tracker.best_epoch_metrics = val_metrics
self._save_checkpoint(self._batch_num_total)
if self.callbacks is not None:
for callback in self.callbacks:
callback.on_batch_end(self._batch_num_total)
metrics = training_util.get_metrics(self.model, train_loss, batches_this_epoch, reset=True)
metrics['cpu_memory_MB'] = peak_cpu_usage
for (gpu_num, memory) in gpu_usage:
metrics['gpu_'+str(gpu_num)+'_memory_MB'] = memory
return metrics
def _validation_loss(self) -> Tuple[float, int]:
"""
Computes the validation loss. Returns it and the number of batches.
"""
logger.info("Validating")
self.model.eval()
# Replace parameter values with the shadow values from the moving averages.
if self._moving_average is not None:
self._moving_average.assign_average_value()
if self._validation_iterator is not None:
val_iterator = self._validation_iterator
else:
val_iterator = self.iterator
num_gpus = len(self._cuda_devices)
raw_val_generator = val_iterator(self._validation_data,
num_epochs=1,
shuffle=False)
val_generator = lazy_groups_of(raw_val_generator, num_gpus)
num_validation_batches = math.ceil(val_iterator.get_num_batches(self._validation_data)/num_gpus)
val_generator_tqdm = Tqdm.tqdm(val_generator,
total=num_validation_batches)
batches_this_epoch = 0
val_loss = 0
for batch_group in val_generator_tqdm:
loss = self.batch_loss(batch_group, for_training=False)
if loss is not None:
# You shouldn't necessarily have to compute a loss for validation, so we allow for
# `loss` to be None. We need to be careful, though - `batches_this_epoch` is
# currently only used as the divisor for the loss function, so we can safely only
# count those batches for which we actually have a loss. If this variable ever
# gets used for something else, we might need to change things around a bit.
batches_this_epoch += 1
val_loss += loss.detach().cpu().numpy()
# Update the description with the latest metrics
val_metrics = training_util.get_metrics(self.model, val_loss, batches_this_epoch)
description = training_util.description_from_metrics(val_metrics)
val_generator_tqdm.set_description(description, refresh=False)
# Now restore the original parameter values.
if self._moving_average is not None:
self._moving_average.restore()
return val_loss, batches_this_epoch
def train(self) -> Dict[str, Any]:
"""
Trains the supplied model with the supplied parameters.
"""
try:
epoch_counter = self._restore_checkpoint()
except RuntimeError:
traceback.print_exc()
raise ConfigurationError("Could not recover training from the checkpoint. Did you mean to output to "
"a different serialization directory or delete the existing serialization "
"directory?")
training_util.enable_gradient_clipping(self.model, self._grad_clipping)
logger.info("Beginning training.")
train_metrics: Dict[str, float] = {}
val_metrics: Dict[str, float] = {}
this_epoch_val_metric: float = None
metrics: Dict[str, Any] = {}
epochs_trained = 0
training_start_time = time.time()
metrics['best_epoch'] = self._metric_tracker.best_epoch
for key, value in self._metric_tracker.best_epoch_metrics.items():
metrics["best_validation_" + key] = value
if self.callbacks is not None:
with torch.no_grad():
for callback in self.callbacks:
callback.on_train_begin()
for epoch in range(epoch_counter, self._num_epochs):
epoch_start_time = time.time()
if self.callbacks is not None:
with torch.no_grad():
for callback in self.callbacks:
callback.on_epoch_begin(epoch)
train_metrics = self._train_epoch(epoch)
if not self._early_stopping_by_batch:
# get peak of memory usage
if 'cpu_memory_MB' in train_metrics:
metrics['peak_cpu_memory_MB'] = max(metrics.get('peak_cpu_memory_MB', 0),
train_metrics['cpu_memory_MB'])
for key, value in train_metrics.items():
if key.startswith('gpu_'):
metrics["peak_"+key] = max(metrics.get("peak_"+key, 0), value)
if self._validation_data is not None:
with torch.no_grad():
val_metrics_temp = self._estimator.estimate(self._validation_data)
# We have a validation set, so compute all the metrics on it.
# val_loss, num_batches = self._validation_loss()
# val_metrics = training_util.get_metrics(self.model, val_loss, num_batches, reset=True)
val_metrics = {'loss': 0}
if 'sentiment_acc' in val_metrics_temp:
val_metrics['accuracy'] = val_metrics_temp['sentiment_acc']
if 'category_f1' in val_metrics_temp:
val_metrics['category_f1'] = val_metrics_temp['category_f1']['fscore']
# Check validation metric for early stopping
this_epoch_val_metric = val_metrics[self._validation_metric]
self._metric_tracker.add_metric(this_epoch_val_metric)
if self._metric_tracker.should_stop_early():
logger.info("Ran out of patience. Stopping training.")
break
self._tensorboard.log_metrics(train_metrics,
val_metrics=val_metrics,
log_to_console=True,
epoch=epoch + 1) # +1 because tensorboard doesn't like 0
# Create overall metrics dict
training_elapsed_time = time.time() - training_start_time
metrics["training_duration"] = str(datetime.timedelta(seconds=training_elapsed_time))
metrics["training_start_epoch"] = epoch_counter
metrics["training_epochs"] = epochs_trained
metrics["epoch"] = epoch
for key, value in train_metrics.items():
metrics["training_" + key] = value
for key, value in val_metrics.items():
metrics["validation_" + key] = value
if self._metric_tracker.is_best_so_far():
# Update all the best_ metrics.
# (Otherwise they just stay the same as they were.)
metrics['best_epoch'] = epoch
for key, value in val_metrics.items():
metrics["best_validation_" + key] = value
self._metric_tracker.best_epoch_metrics = val_metrics
if self._serialization_dir:
dump_metrics(os.path.join(self._serialization_dir, f'metrics_epoch_{epoch}.json'), metrics)
# The Scheduler API is agnostic to whether your schedule requires a validation metric -
# if it doesn't, the validation metric passed here is ignored.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step(this_epoch_val_metric, epoch)
if self._momentum_scheduler:
self._momentum_scheduler.step(this_epoch_val_metric, epoch)
self._save_checkpoint(epoch)
else:
if self._metric_tracker.should_stop_early():
logger.info("Ran out of patience. Stopping training.")
break
epoch_elapsed_time = time.time() - epoch_start_time
logger.info("Epoch duration: %s", datetime.timedelta(seconds=epoch_elapsed_time))
if epoch < self._num_epochs - 1:
training_elapsed_time = time.time() - training_start_time
estimated_time_remaining = training_elapsed_time * \
((self._num_epochs - epoch_counter) / float(epoch - epoch_counter + 1) - 1)
formatted_time = str(datetime.timedelta(seconds=int(estimated_time_remaining)))
logger.info("Estimated training time remaining: %s", formatted_time)
if self.callbacks is not None:
with torch.no_grad():
for callback in self.callbacks:
callback.on_epoch_end(epoch)
epochs_trained += 1
# make sure pending events are flushed to disk and files are closed properly
self._tensorboard.close()
# Load the best model state before returning
best_model_state = self._checkpointer.best_model_state()
if best_model_state:
self.model.load_state_dict(best_model_state)
return metrics
def _save_checkpoint(self, epoch: Union[int, str]) -> None:
"""
Saves a checkpoint of the model to self._serialization_dir.
Is a no-op if self._serialization_dir is None.
Parameters
----------
epoch : Union[int, str], required.
The epoch of training. If the checkpoint is saved in the middle
of an epoch, the parameter is a string with the epoch and timestamp.
"""
# If moving averages are used for parameters, we save
# the moving average values into checkpoint, instead of the current values.
if self._moving_average is not None:
self._moving_average.assign_average_value()
# These are the training states we need to persist.
training_states = {
"metric_tracker": self._metric_tracker.state_dict(),
"optimizer": self.optimizer.state_dict(),
"batch_num_total": self._batch_num_total
}
# If we have a learning rate or momentum scheduler, we should persist them too.
if self._learning_rate_scheduler is not None:
training_states["learning_rate_scheduler"] = self._learning_rate_scheduler.state_dict()
if self._momentum_scheduler is not None:
training_states["momentum_scheduler"] = self._momentum_scheduler.state_dict()
self._checkpointer.save_checkpoint(
model_state=self.model.state_dict(),
epoch=epoch,
training_states=training_states,
is_best_so_far=self._metric_tracker.is_best_so_far())
# Restore the original values for parameters so that training will not be affected.
if self._moving_average is not None:
self._moving_average.restore()
def _restore_checkpoint(self) -> int:
"""
Restores the model and training state from the last saved checkpoint.
This includes an epoch count and optimizer state, which is serialized separately
from model parameters. This function should only be used to continue training -
if you wish to load a model for inference/load parts of a model into a new
computation graph, you should use the native Pytorch functions:
`` model.load_state_dict(torch.load("/path/to/model/weights.th"))``
If ``self._serialization_dir`` does not exist or does not contain any checkpointed weights,
this function will do nothing and return 0.
Returns
-------
epoch: int
The epoch at which to resume training, which should be one after the epoch
in the saved training state.
"""
model_state, training_state = self._checkpointer.restore_checkpoint()
if not training_state:
# No checkpoint to restore, start at 0
return 0
self.model.load_state_dict(model_state)
self.optimizer.load_state_dict(training_state["optimizer"])
if self._learning_rate_scheduler is not None and "learning_rate_scheduler" in training_state:
self._learning_rate_scheduler.load_state_dict(training_state["learning_rate_scheduler"])
if self._momentum_scheduler is not None and "momentum_scheduler" in training_state:
self._momentum_scheduler.load_state_dict(training_state["momentum_scheduler"])
training_util.move_optimizer_to_cuda(self.optimizer)
# Currently the ``training_state`` contains a serialized ``MetricTracker``.
if "metric_tracker" in training_state:
self._metric_tracker.load_state_dict(training_state["metric_tracker"])
# It used to be the case that we tracked ``val_metric_per_epoch``.
elif "val_metric_per_epoch" in training_state:
self._metric_tracker.clear()
self._metric_tracker.add_metrics(training_state["val_metric_per_epoch"])
# And before that we didn't track anything.
else:
self._metric_tracker.clear()
if isinstance(training_state["epoch"], int):
epoch_to_return = training_state["epoch"] + 1
else:
epoch_to_return = int(training_state["epoch"].split('.')[0]) + 1
# For older checkpoints with batch_num_total missing, default to old behavior where
# it is unchanged.
batch_num_total = training_state.get('batch_num_total')
if batch_num_total is not None:
self._batch_num_total = batch_num_total
return epoch_to_return
# Requires custom from_params.
@classmethod
def from_params(cls, # type: ignore
model: Model,
serialization_dir: str,
iterator: DataIterator,
train_data: Iterable[Instance],
validation_data: Optional[Iterable[Instance]],
params: Params,
validation_iterator: DataIterator = None) -> 'Trainer':
# pylint: disable=arguments-differ
patience = params.pop_int("patience", None)
validation_metric = params.pop("validation_metric", "-loss")
shuffle = params.pop_bool("shuffle", True)
num_epochs = params.pop_int("num_epochs", 20)
cuda_device = parse_cuda_device(params.pop("cuda_device", -1))
grad_norm = params.pop_float("grad_norm", None)
grad_clipping = params.pop_float("grad_clipping", None)
lr_scheduler_params = params.pop("learning_rate_scheduler", None)
momentum_scheduler_params = params.pop("momentum_scheduler", None)
if isinstance(cuda_device, list):
model_device = cuda_device[0]
else:
model_device = cuda_device
if model_device >= 0:
# Moving model to GPU here so that the optimizer state gets constructed on
# the right device.
model = model.cuda(model_device)
parameters = [[n, p] for n, p in model.named_parameters() if p.requires_grad]
optimizer = Optimizer.from_params(parameters, params.pop("optimizer"))
if "moving_average" in params:
moving_average = MovingAverage.from_params(params.pop("moving_average"), parameters=parameters)
else:
moving_average = None
if lr_scheduler_params:
lr_scheduler = LearningRateScheduler.from_params(optimizer, lr_scheduler_params)
else:
lr_scheduler = None
if momentum_scheduler_params:
momentum_scheduler = MomentumScheduler.from_params(optimizer, momentum_scheduler_params)
else:
momentum_scheduler = None
if 'checkpointer' in params:
if 'keep_serialized_model_every_num_seconds' in params or \
'num_serialized_models_to_keep' in params:
raise ConfigurationError(
"Checkpointer may be initialized either from the 'checkpointer' key or from the "
"keys 'num_serialized_models_to_keep' and 'keep_serialized_model_every_num_seconds'"
" but the passed config uses both methods.")
checkpointer = Checkpointer.from_params(params.pop("checkpointer"))
else:
num_serialized_models_to_keep = params.pop_int("num_serialized_models_to_keep", 20)
keep_serialized_model_every_num_seconds = params.pop_int(
"keep_serialized_model_every_num_seconds", None)
checkpointer = Checkpointer(
serialization_dir=serialization_dir,
num_serialized_models_to_keep=num_serialized_models_to_keep,
keep_serialized_model_every_num_seconds=keep_serialized_model_every_num_seconds)
model_save_interval = params.pop_float("model_save_interval", None)
summary_interval = params.pop_int("summary_interval", 100)
histogram_interval = params.pop_int("histogram_interval", None)
should_log_parameter_statistics = params.pop_bool("should_log_parameter_statistics", True)
should_log_learning_rate = params.pop_bool("should_log_learning_rate", False)
log_batch_size_period = params.pop_int("log_batch_size_period", None)
params.assert_empty(cls.__name__)
return cls(model, optimizer, iterator,
train_data, validation_data,
patience=patience,
validation_metric=validation_metric,
validation_iterator=validation_iterator,
shuffle=shuffle,
num_epochs=num_epochs,
serialization_dir=serialization_dir,
cuda_device=cuda_device,
grad_norm=grad_norm,
grad_clipping=grad_clipping,
learning_rate_scheduler=lr_scheduler,
momentum_scheduler=momentum_scheduler,
checkpointer=checkpointer,
model_save_interval=model_save_interval,
summary_interval=summary_interval,
histogram_interval=histogram_interval,
should_log_parameter_statistics=should_log_parameter_statistics,
should_log_learning_rate=should_log_learning_rate,
log_batch_size_period=log_batch_size_period,
moving_average=moving_average)
|
the-stack_106_28163 | import pendulum
from dagster.core.definitions.run_request import InstigatorType
from dagster.core.scheduler.instigation import InstigatorState, InstigatorStatus
from dagster.core.test_utils import create_test_daemon_workspace
from dagster.daemon import get_default_daemon_logger
from dagster.daemon.sensor import execute_sensor_iteration
from dagster.utils import Counter, traced_counter
from dagster_graphql.test.utils import (
execute_dagster_graphql,
infer_repository_selector,
infer_sensor_selector,
main_repo_location_name,
main_repo_name,
)
from .graphql_context_test_suite import (
ExecutingGraphQLContextTestMatrix,
NonLaunchableGraphQLContextTestMatrix,
)
GET_SENSORS_QUERY = """
query SensorsQuery($repositorySelector: RepositorySelector!) {
sensorsOrError(repositorySelector: $repositorySelector) {
__typename
... on PythonError {
message
stack
}
... on Sensors {
results {
name
targets {
pipelineName
solidSelection
mode
}
description
minIntervalSeconds
sensorState {
status
runs {
id
runId
}
runsCount
ticks {
id
status
timestamp
runIds
error {
message
stack
}
skipReason
}
}
}
}
}
}
"""
GET_SENSOR_QUERY = """
query SensorQuery($sensorSelector: SensorSelector!) {
sensorOrError(sensorSelector: $sensorSelector) {
__typename
... on PythonError {
message
stack
}
... on Sensor {
name
targets {
pipelineName
solidSelection
mode
}
minIntervalSeconds
nextTick {
timestamp
}
sensorState {
status
runs {
id
runId
}
runsCount
ticks {
id
status
timestamp
runIds
error {
message
stack
}
}
}
}
}
}
"""
GET_SENSOR_STATUS_QUERY = """
query SensorStateQuery($sensorSelector: SensorSelector!) {
sensorOrError(sensorSelector: $sensorSelector) {
__typename
... on Sensor {
sensorState {
id
status
}
}
}
}
"""
GET_SENSOR_TICK_RANGE_QUERY = """
query SensorQuery($sensorSelector: SensorSelector!, $dayRange: Int, $dayOffset: Int) {
sensorOrError(sensorSelector: $sensorSelector) {
__typename
... on PythonError {
message
stack
}
... on Sensor {
id
sensorState {
id
ticks(dayRange: $dayRange, dayOffset: $dayOffset) {
id
timestamp
}
}
}
}
}
"""
START_SENSORS_QUERY = """
mutation($sensorSelector: SensorSelector!) {
startSensor(sensorSelector: $sensorSelector) {
... on PythonError {
message
className
stack
}
... on Sensor {
id
jobOriginId
sensorState {
status
}
}
}
}
"""
STOP_SENSORS_QUERY = """
mutation($jobOriginId: String!) {
stopSensor(jobOriginId: $jobOriginId) {
... on PythonError {
message
className
stack
}
... on StopSensorMutationResult {
instigationState {
status
}
}
}
}
"""
REPOSITORY_SENSORS_QUERY = """
query RepositorySensorsQuery($repositorySelector: RepositorySelector!) {
repositoryOrError(repositorySelector: $repositorySelector) {
... on Repository {
id
sensors {
id
name
sensorState {
id
runs(limit: 1) {
id
runId
}
}
}
}
}
}
"""
class TestSensors(NonLaunchableGraphQLContextTestMatrix):
def test_get_sensors(self, graphql_context, snapshot):
selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
GET_SENSORS_QUERY,
variables={"repositorySelector": selector},
)
assert result.data
assert result.data["sensorsOrError"]
assert result.data["sensorsOrError"]["__typename"] == "Sensors"
results = result.data["sensorsOrError"]["results"]
snapshot.assert_match(results)
def test_get_sensor(self, graphql_context, snapshot):
sensor_selector = infer_sensor_selector(graphql_context, "always_no_config_sensor")
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_QUERY,
variables={"sensorSelector": sensor_selector},
)
assert result.data
assert result.data["sensorOrError"]
assert result.data["sensorOrError"]["__typename"] == "Sensor"
sensor = result.data["sensorOrError"]
snapshot.assert_match(sensor)
class TestSensorMutations(ExecutingGraphQLContextTestMatrix):
def test_start_sensor(self, graphql_context):
sensor_selector = infer_sensor_selector(graphql_context, "always_no_config_sensor")
result = execute_dagster_graphql(
graphql_context,
START_SENSORS_QUERY,
variables={"sensorSelector": sensor_selector},
)
assert result.data
assert result.data["startSensor"]["sensorState"]["status"] == InstigatorStatus.RUNNING.value
def test_stop_sensor(self, graphql_context):
sensor_selector = infer_sensor_selector(graphql_context, "always_no_config_sensor")
# start sensor
start_result = execute_dagster_graphql(
graphql_context,
START_SENSORS_QUERY,
variables={"sensorSelector": sensor_selector},
)
assert (
start_result.data["startSensor"]["sensorState"]["status"]
== InstigatorStatus.RUNNING.value
)
job_origin_id = start_result.data["startSensor"]["jobOriginId"]
result = execute_dagster_graphql(
graphql_context,
STOP_SENSORS_QUERY,
variables={"jobOriginId": job_origin_id},
)
assert result.data
assert (
result.data["stopSensor"]["instigationState"]["status"]
== InstigatorStatus.STOPPED.value
)
def test_start_sensor_with_default_status(self, graphql_context):
sensor_selector = infer_sensor_selector(graphql_context, "running_in_code_sensor")
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_STATUS_QUERY,
variables={"sensorSelector": sensor_selector},
)
assert result.data["sensorOrError"]["sensorState"]["status"] == "RUNNING"
sensor_origin_id = result.data["sensorOrError"]["sensorState"]["id"]
start_result = execute_dagster_graphql(
graphql_context,
START_SENSORS_QUERY,
variables={"sensorSelector": sensor_selector},
)
assert (
"You have attempted to start sensor running_in_code_sensor, but it is already running"
in start_result.data["startSensor"]["message"]
)
stop_result = execute_dagster_graphql(
graphql_context,
STOP_SENSORS_QUERY,
variables={"jobOriginId": sensor_origin_id},
)
assert stop_result.data["stopSensor"]["instigationState"]["status"] == "STOPPED"
# Now can be restarted
start_result = execute_dagster_graphql(
graphql_context,
START_SENSORS_QUERY,
variables={"sensorSelector": sensor_selector},
)
assert start_result.data["startSensor"]["sensorState"]["status"] == "RUNNING"
def test_sensor_next_ticks(graphql_context):
external_repository = graphql_context.get_repository_location(
main_repo_location_name()
).get_repository(main_repo_name())
sensor_name = "always_no_config_sensor"
external_sensor = external_repository.get_external_sensor(sensor_name)
sensor_selector = infer_sensor_selector(graphql_context, sensor_name)
result = execute_dagster_graphql(
graphql_context, GET_SENSOR_QUERY, variables={"sensorSelector": sensor_selector}
)
# test default sensor off
assert result.data
assert result.data["sensorOrError"]["__typename"] == "Sensor"
next_tick = result.data["sensorOrError"]["nextTick"]
assert not next_tick
# test default sensor with no tick
graphql_context.instance.add_instigator_state(
InstigatorState(
external_sensor.get_external_origin(), InstigatorType.SENSOR, InstigatorStatus.RUNNING
)
)
result = execute_dagster_graphql(
graphql_context, GET_SENSOR_QUERY, variables={"sensorSelector": sensor_selector}
)
assert result.data
assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 0
assert result.data["sensorOrError"]["__typename"] == "Sensor"
next_tick = result.data["sensorOrError"]["nextTick"]
assert not next_tick
# test default sensor with last tick
_create_tick(graphql_context)
result = execute_dagster_graphql(
graphql_context, GET_SENSOR_QUERY, variables={"sensorSelector": sensor_selector}
)
assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 1
assert result.data
assert result.data["sensorOrError"]["__typename"] == "Sensor"
next_tick = result.data["sensorOrError"]["nextTick"]
assert next_tick
def _create_tick(graphql_context):
with create_test_daemon_workspace(
graphql_context.process_context.workspace_load_target
) as workspace:
list(
execute_sensor_iteration(
graphql_context.instance, get_default_daemon_logger("SensorDaemon"), workspace
)
)
def test_sensor_tick_range(graphql_context):
external_repository = graphql_context.get_repository_location(
main_repo_location_name()
).get_repository(main_repo_name())
sensor_name = "always_no_config_sensor"
external_sensor = external_repository.get_external_sensor(sensor_name)
sensor_selector = infer_sensor_selector(graphql_context, sensor_name)
# test with no job state
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_TICK_RANGE_QUERY,
variables={"sensorSelector": sensor_selector, "dayRange": None, "dayOffset": None},
)
assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 0
# turn the sensor on
graphql_context.instance.add_instigator_state(
InstigatorState(
external_sensor.get_external_origin(), InstigatorType.SENSOR, InstigatorStatus.RUNNING
)
)
now = pendulum.now("US/Central")
one = now.subtract(days=2).subtract(hours=1)
with pendulum.test(one):
_create_tick(graphql_context)
two = now.subtract(days=1).subtract(hours=1)
with pendulum.test(two):
_create_tick(graphql_context)
three = now.subtract(hours=1)
with pendulum.test(three):
_create_tick(graphql_context)
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_TICK_RANGE_QUERY,
variables={"sensorSelector": sensor_selector, "dayRange": None, "dayOffset": None},
)
assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 3
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_TICK_RANGE_QUERY,
variables={"sensorSelector": sensor_selector, "dayRange": 1, "dayOffset": None},
)
assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 1
assert result.data["sensorOrError"]["sensorState"]["ticks"][0]["timestamp"] == three.timestamp()
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_TICK_RANGE_QUERY,
variables={"sensorSelector": sensor_selector, "dayRange": 1, "dayOffset": 1},
)
assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 1
assert result.data["sensorOrError"]["sensorState"]["ticks"][0]["timestamp"] == two.timestamp()
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_TICK_RANGE_QUERY,
variables={
"sensorSelector": sensor_selector,
"dayRange": 2,
"dayOffset": None,
},
)
assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 2
def test_repository_batching(graphql_context):
traced_counter.set(Counter())
selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
REPOSITORY_SENSORS_QUERY,
variables={"repositorySelector": selector},
)
assert result.data
assert "repositoryOrError" in result.data
assert "sensors" in result.data["repositoryOrError"]
counter = traced_counter.get()
counts = counter.counts()
assert counts
assert len(counts) == 2
# We should have a single batch call to fetch run records (to fetch sensor runs) and a single
# batch call to fetch instigator state, instead of separate calls for each sensor (~5 distinct
# sensors in the repo)
# 1) `get_run_records` is fetched to instantiate GrapheneRun
# 2) `all_instigator_state` is fetched to instantiate GrapheneSensor
assert counts.get("DagsterInstance.get_run_records") == 1
assert counts.get("DagsterInstance.all_instigator_state") == 1
|
the-stack_106_28165 | import scrapy
from datetime import datetime, timedelta
def clean_time(input):
time = input.lower().replace(',','').replace('alle ','').replace('il ',' ').strip()
to_english={}
to_english['gennaio']='january'
to_english['febbraio']='february'
to_english['marzo']='march'
to_english['aprile']='april'
to_english['maggio']='may'
to_english['giugno']='june'
to_english['luglio']='july'
to_english['agosto']='august'
to_english['settembre']='september'
to_english['ottobre']='october'
to_english['novembre']='november'
to_english['dicembre']='december'
to_english['oggi'] = datetime.today().strftime('%d %B %Y')
to_english['ieri'] = (datetime.today() - timedelta(days=1)).strftime('%d %B %Y')
for key in to_english:
if key in time:
time = time.replace(key, to_english[key])
break # only onee month per string!
d = datetime.strptime(time, '%d %B %Y %H:%M')
return d.strftime('%Y-%m-%d %H:%M')
class ForumSpider(scrapy.Spider):
name = "forum"
def start_requests(self):
urls = [
'https://www.matrimonio.com/forum/recenti'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse_comments(self, parent_id, response):
i=0
for comment in response.xpath('//li[@class="pure-g discuss-post-comment"]'):
# take all the text, turn it to lower case and remove extra spaces
# ignore 'Visualizza messaggio citato'
text = [x.strip().lower() for x in comment.xpath('div//div[contains(@class,"discuss-post-comment-content") and contains(@class,"com-discuss")]/*//text()').getall()
if 'Visualizza messaggio citato' not in x]
# remove empty strings
text = list(filter(None, text))
# join different paragraphs of same comment
text = ' '.join(text)
title = 'comment' # comments don't have title, but giving them the titile comment can help distinguish from main thread
id = comment.xpath('div//div[@class="discussion-message-globe"]/div[@class="discuss-post-comment-header"]/@id').get(default='NA').lower().strip()
author = comment.xpath('div/a/@data-id-user').get(default='NA').lower().strip()
time = clean_time(comment.xpath('div//time/text()').get(default='NA'))
yield{
'title':title,
'text':text,
'id':id,
'author':author,
'time': time,
'parent_id': parent_id
}
def parse_topic(self, response):
title = response.xpath('//meta[@property="og:title"]/@content').get(default='NA').strip().lower()
text = response.xpath('//div[contains(@class,"com-post-content")]/text()').get(default='NA').strip().lower()
id = response.xpath('//div[@id="relatedDressesBox"]/@data-idtema').get(default='NA').lower().strip()
author = response.xpath('//div[@class="wrapper main"]//div[contains(@class,"com-post-header-author")]/a/@data-id-user').get(default='NA').lower().strip()
time = clean_time(response.xpath('//span[@class="com-post-header-meta-date"]/text()').getall()[1])
yield{
'title':title,
'text':text,
'id':id,
'parent_id':id, #for the initial post I set the parent id as the it of the post itself
'author':author,
'time':time
}
# store the comments on the first page of the thread
for comment in self.parse_comments( parent_id=id, response=response):
yield comment
# now I follow the other pages on the same thread
for link in response.xpath('/html/head/link[@rel="next"]/@href').getall():
yield response.follow(link, callback=lambda y: self.parse_comments(parent_id=id, response=y))
def parse(self, response):
# from the main page, follow each comment to its own page
for topic_ref in response.xpath('//div[@class="discussion-post-item "]//a[@class="discussion-post-item-title"]'):
yield response.follow(topic_ref, callback=self.parse_topic)
# lok at pages after the first one
for a in response.xpath('//a[@class="next"]'):
yield response.follow(a, callback=self.parse)
|
the-stack_106_28166 | EMPTY_STRING = '<DYNAMODB_EMPTY_STRING>'
def SimpleToField(simple):
if isinstance(simple, list):
return {'L': [SimpleToField(nested_simple) for nested_simple in simple]}
elif isinstance(simple, dict):
return {'M': {nested_key: SimpleToField(nested_simple) for nested_key, nested_simple in simple.items()}}
elif isinstance(simple, str):
if simple == '':
return {'S': EMPTY_STRING}
else:
return {'S': simple}
elif isinstance(simple, bool):
return {'BOOL': simple}
elif isinstance(simple, (float, int)):
return {'N': str(simple)}
elif simple is None:
return {'NULL': True}
else:
raise TypeError('Unsupported type: %s' % type(simple))
def SimpleToItem(simple):
return {nested_key: SimpleToField(nested_simple) for nested_key, nested_simple in simple.items()}
def FieldToSimple(type_item):
assert isinstance(type_item, dict)
assert len(type_item) == 1, 'Length must be 1, but %d' % len(type_item)
type_, item = list(type_item.items())[0]
if type_ == 'L':
return [FieldToSimple(nested_type_item) for nested_type_item in item]
elif type_ == 'M':
return {nested_key: FieldToSimple(nested_type_item)
for nested_key, nested_type_item in item.items()}
elif type_ == 'S':
simple = str(item)
if simple == EMPTY_STRING:
return ''
else:
return simple
elif type_ == 'BOOL':
return item
elif type_ == 'N':
return float(item)
elif type_ == 'NULL':
assert item is True
return None
else:
return TypeError('Unsorted type: %s' % type_)
def ItemToSimple(item):
return {nested_key: FieldToSimple(nested_type_item) for nested_key, nested_type_item in item.items()}
|
the-stack_106_28168 | # System libs
import os
import argparse
from distutils.version import LooseVersion
import json
# Numerical libs
import numpy as np
import torch
import torch.nn as nn
import csv
import logging
# Our libs
from dataset import TestDataset
from models import ModelBuilder, SegmentationModule
from utils import colorEncode, find_recursive, setup_logger
from lib.nn import user_scattered_collate, async_copy_to
from lib.utils import as_numpy
from PIL import Image
from tqdm import tqdm
from config import cfg
from models.modelsummary import get_model_summary
def visualize_result(data, pred, cfg):
colors = []
names = {}
with open(cfg.DATASET.classInfo) as f:
clsInfo = json.load(f)
for c in clsInfo:
names[c] = clsInfo[c]['name']
colors.append(clsInfo[c]['color'])
colors = np.array(colors, dtype='uint8')
(img, info) = data
show_result = False
# print predictions in descending order
pred = np.int32(pred)
pixs = pred.size
uniques, counts = np.unique(pred, return_counts=True)
print("Predictions in [{}]:".format(info))
for idx in np.argsort(counts)[::-1]:
name = names[str(uniques[idx] + 1)]
ratio = counts[idx] / pixs * 100
if ratio > 0.1:
print(" {}: {:.2f}%".format(name, ratio))
# colorize prediction
pred_color = colorEncode(pred, colors).astype(np.uint8)
# aggregate images and save
im_vis = np.concatenate((img, pred_color), axis=1)
img_name = info.split('/')[-1]
Image.fromarray(im_vis).save(
os.path.join(cfg.TEST.result, '{}_{}{}.png'.format(img_name[:-4], cfg.MODEL.arch_encoder, cfg.MODEL.arch_decoder)))
def test(segmentation_module, loader, gpu):
segmentation_module.eval()
pbar = tqdm(total=len(loader))
for batch_data in loader:
# process data
batch_data = batch_data[0]
segSize = (batch_data['img_ori'].shape[0],
batch_data['img_ori'].shape[1])
img_resized_list = batch_data['img_data']
with torch.no_grad():
scores = torch.zeros(1, cfg.DATASET.num_class, segSize[0], segSize[1])
scores = async_copy_to(scores, gpu)
for img in img_resized_list:
feed_dict = batch_data.copy()
feed_dict['img_data'] = img
del feed_dict['img_ori']
del feed_dict['info']
feed_dict = async_copy_to(feed_dict, gpu)
# forward pass
pred_tmp = segmentation_module(feed_dict, segSize=segSize)
scores += pred_tmp / len(cfg.DATASET.imgSizes)
_, pred = torch.max(scores, dim=1)
pred = as_numpy(pred.squeeze(0).cpu())
#print(pred)
# visualization
visualize_result((batch_data['img_ori'], batch_data['info']),pred,cfg)
pbar.update(1)
def main(cfg, gpu):
torch.cuda.set_device(gpu)
# Network Builders
net_encoder = ModelBuilder.build_encoder(
arch=cfg.MODEL.arch_encoder,
fc_dim=cfg.MODEL.fc_dim,
weights=cfg.MODEL.weights_encoder)
net_decoder = ModelBuilder.build_decoder(
arch=cfg.MODEL.arch_decoder,
fc_dim=cfg.MODEL.fc_dim,
num_class=cfg.DATASET.num_class,
weights=cfg.MODEL.weights_decoder,
use_softmax=True)
crit = nn.NLLLoss(ignore_index=-1)
#crit = nn.CrossEntropyLoss(ignore_index=-1)
segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
# Dataset and Loader
dataset_test = TestDataset(
cfg.list_test,
cfg.DATASET)
loader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=cfg.TEST.batch_size,
shuffle=False,
collate_fn=user_scattered_collate,
num_workers=5,
drop_last=True)
segmentation_module.cuda()
# dump_input = torch.rand((1, 3, 1920, 1080))
# with open ('dump_model_2.txt', 'w') as file:
# file.write(get_model_summary(segmentation_module.cuda(), dump_input.cuda(), verbose=True))
# Main loop
test(segmentation_module, loader_test, gpu)
print('Inference done!')
if __name__ == '__main__':
assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), \
'PyTorch>=0.4.0 is required'
parser = argparse.ArgumentParser(
description="PyTorch Semantic Segmentation Testing"
)
parser.add_argument(
"--imgs",
required=True,
type=str,
help="an image paths, or a directory name"
)
parser.add_argument(
"--cfg",
default="config/ade20k-resnet50dilated-ppm_deepsup.yaml",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"--gpu",
default=0,
type=int,
help="gpu id for evaluation"
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
# cfg.freeze()
logger = setup_logger(distributed_rank=0) # TODO
logger.info("Loaded configuration file {}".format(args.cfg))
logger.info("Running with config:\n{}".format(cfg))
cfg.MODEL.arch_encoder = cfg.MODEL.arch_encoder.lower()
cfg.MODEL.arch_decoder = cfg.MODEL.arch_decoder.lower()
# absolute paths of model weights
cfg.MODEL.weights_encoder = os.path.join(
cfg.DIR, 'encoder_' + cfg.TEST.checkpoint)
cfg.MODEL.weights_decoder = os.path.join(
cfg.DIR, 'decoder_' + cfg.TEST.checkpoint)
assert os.path.exists(cfg.MODEL.weights_encoder) and \
os.path.exists(cfg.MODEL.weights_decoder), "checkpoint does not exitst!"
# generate testing image list
if os.path.isdir(args.imgs):
print(args.imgs)
imgs = find_recursive(args.imgs)
else:
imgs = [args.imgs]
assert len(imgs), "imgs should be a path to image (.jpg) or directory."
cfg.list_test = [{'fpath_img': x} for x in imgs]
if not os.path.isdir(cfg.TEST.result):
os.makedirs(cfg.TEST.result)
main(cfg, args.gpu)
|
the-stack_106_28170 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions provided for renderer."""
import numpy as np
import tensorflow as tf
def create_vertices_intrinsics(disparity, intrinsics):
"""3D mesh vertices from a given disparity and intrinsics.
Args:
disparity: [B, H, W] inverse depth
intrinsics: [B, 4] reference intrinsics
Returns:
[B, L, H*W, 3] vertex coordinates.
"""
# Focal lengths
fx = intrinsics[:, 0]
fy = intrinsics[:, 1]
fx = fx[Ellipsis, tf.newaxis, tf.newaxis]
fy = fy[Ellipsis, tf.newaxis, tf.newaxis]
# Centers
cx = intrinsics[:, 2]
cy = intrinsics[:, 3]
cx = cx[Ellipsis, tf.newaxis]
cy = cy[Ellipsis, tf.newaxis]
batch_size, height, width = disparity.shape.as_list()
vertex_count = height * width
i, j = tf.meshgrid(tf.range(width), tf.range(height))
i = tf.cast(i, tf.float32)
j = tf.cast(j, tf.float32)
width = tf.cast(width, tf.float32)
height = tf.cast(height, tf.float32)
# 0.5 is added to get the position of the pixel centers.
i = (i + 0.5) / width
j = (j + 0.5) / height
i = i[tf.newaxis]
j = j[tf.newaxis]
depths = 1.0 / tf.clip_by_value(disparity, 0.01, 1.0)
mx = depths / fx
my = depths / fy
px = (i-cx) * mx
py = (j-cy) * my
vertices = tf.stack([px, py, depths], axis=-1)
vertices = tf.reshape(vertices, (batch_size, vertex_count, 3))
return vertices
def create_triangles(h, w):
"""Creates mesh triangle indices from a given pixel grid size.
This function is not and need not be differentiable as triangle indices are
fixed.
Args:
h: (int) denoting the height of the image.
w: (int) denoting the width of the image.
Returns:
triangles: 2D numpy array of indices (int) with shape (2(W-1)(H-1) x 3)
"""
x, y = np.meshgrid(range(w - 1), range(h - 1))
tl = y * w + x
tr = y * w + x + 1
bl = (y + 1) * w + x
br = (y + 1) * w + x + 1
triangles = np.array([tl, bl, tr, br, tr, bl])
triangles = np.transpose(triangles, (1, 2, 0)).reshape(
((w - 1) * (h - 1) * 2, 3))
return triangles
def perspective_from_intrinsics(intrinsics):
"""Computes a perspective matrix from camera intrinsics.
The matrix maps camera-space to clip-space (x, y, z, w) where (x/w, y/w, z/w)
ranges from -1 to 1 in each axis. It's a standard OpenGL-stye perspective
matrix, except that we use positive Z for the viewing direction (instead of
negative) so there are sign differences.
Args:
intrinsics: [B, 4] Source camera intrinsics tensor (f_x, f_y, c_x, c_y)
Returns:
A [B, 4, 4] float32 Tensor that maps from right-handed camera space
to left-handed clip space.
"""
intrinsics = tf.convert_to_tensor(intrinsics)
focal_x = intrinsics[:, 0]
focal_y = intrinsics[:, 1]
principal_x = intrinsics[:, 2]
principal_y = intrinsics[:, 3]
zero = tf.zeros_like(focal_x)
one = tf.ones_like(focal_x)
near_z = 0.00001 * one
far_z = 10000.0 * one
a = (near_z + far_z) / (far_z - near_z)
b = -2.0 * near_z * far_z / (far_z - near_z)
matrix = [
[2.0 * focal_x, zero, 2.0 * principal_x - 1.0, zero],
[zero, 2.0 * focal_y, 2.0 * principal_y - 1.0, zero],
[zero, zero, a, b],
[zero, zero, one, zero]]
return tf.stack([tf.stack(row, axis=-1) for row in matrix], axis=-2)
|
the-stack_106_28172 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
This test verifies that the Spack directory layout works properly.
"""
import os
import os.path
import pytest
import spack.paths
import spack.repo
from spack.directory_layout import YamlDirectoryLayout
from spack.directory_layout import InvalidDirectoryLayoutParametersError
from spack.spec import Spec
# number of packages to test (to reduce test time)
max_packages = 10
def test_yaml_directory_layout_parameters(tmpdir, config):
"""This tests the various parameters that can be used to configure
the install location """
spec = Spec('python')
spec.concretize()
# Ensure default layout matches expected spec format
layout_default = YamlDirectoryLayout(str(tmpdir))
path_default = layout_default.relative_path_for_spec(spec)
assert(path_default == spec.format(
"{architecture}/"
"{compiler.name}-{compiler.version}/"
"{name}-{version}-{hash}"))
# Test hash_length parameter works correctly
layout_10 = YamlDirectoryLayout(str(tmpdir), hash_length=10)
path_10 = layout_10.relative_path_for_spec(spec)
layout_7 = YamlDirectoryLayout(str(tmpdir), hash_length=7)
path_7 = layout_7.relative_path_for_spec(spec)
assert(len(path_default) - len(path_10) == 22)
assert(len(path_default) - len(path_7) == 25)
# Test path_scheme
arch, compiler, package7 = path_7.split('/')
projections_package7 = {'all': "{name}-{version}-{hash:7}"}
layout_package7 = YamlDirectoryLayout(str(tmpdir),
projections=projections_package7)
path_package7 = layout_package7.relative_path_for_spec(spec)
assert(package7 == path_package7)
# Test separation of architecture or namespace
spec2 = Spec('libelf').concretized()
arch_scheme = "{architecture.platform}/{architecture.target}/{architecture.os}/{name}/{version}/{hash:7}" # NOQA: ignore=E501
ns_scheme = "${ARCHITECTURE}/${NAMESPACE}/${PACKAGE}-${VERSION}-${HASH:7}" # NOQA: ignore=E501
arch_ns_scheme_projections = {'all': arch_scheme,
'python': ns_scheme}
layout_arch_ns = YamlDirectoryLayout(
str(tmpdir), projections=arch_ns_scheme_projections)
arch_path_spec2 = layout_arch_ns.relative_path_for_spec(spec2)
assert(arch_path_spec2 == spec2.format(arch_scheme))
ns_path_spec = layout_arch_ns.relative_path_for_spec(spec)
assert(ns_path_spec == spec.format(ns_scheme))
# Ensure conflicting parameters caught
with pytest.raises(InvalidDirectoryLayoutParametersError):
YamlDirectoryLayout(str(tmpdir),
hash_length=20,
projections=projections_package7)
def test_read_and_write_spec(temporary_store, config, mock_packages):
"""This goes through each package in spack and creates a directory for
it. It then ensures that the spec for the directory's
installed package can be read back in consistently, and
finally that the directory can be removed by the directory
layout.
"""
layout = temporary_store.layout
packages = list(spack.repo.path.all_packages())[:max_packages]
for pkg in packages:
if pkg.name.startswith('external'):
# External package tests cannot be installed
continue
spec = pkg.spec
# If a spec fails to concretize, just skip it. If it is a
# real error, it will be caught by concretization tests.
try:
spec.concretize()
except Exception:
continue
layout.create_install_directory(spec)
install_dir = layout.path_for_spec(spec)
spec_path = layout.spec_file_path(spec)
# Ensure directory has been created in right place.
assert os.path.isdir(install_dir)
assert install_dir.startswith(temporary_store.root)
# Ensure spec file exists when directory is created
assert os.path.isfile(spec_path)
assert spec_path.startswith(install_dir)
# Make sure spec file can be read back in to get the original spec
spec_from_file = layout.read_spec(spec_path)
# currently we don't store build dependency information when
# we write out specs to the filesystem.
# TODO: fix this when we can concretize more loosely based on
# TODO: what is installed. We currently omit these to
# TODO: increase reuse of build dependencies.
stored_deptypes = ('link', 'run')
expected = spec.copy(deps=stored_deptypes)
expected._mark_concrete()
assert expected.concrete
assert expected == spec_from_file
assert expected.eq_dag(spec_from_file)
assert spec_from_file.concrete
# Ensure that specs that come out "normal" are really normal.
with open(spec_path) as spec_file:
read_separately = Spec.from_yaml(spec_file.read())
# TODO: revise this when build deps are in dag_hash
norm = read_separately.copy(deps=stored_deptypes)
assert norm == spec_from_file
assert norm.eq_dag(spec_from_file)
# TODO: revise this when build deps are in dag_hash
conc = read_separately.concretized().copy(deps=stored_deptypes)
assert conc == spec_from_file
assert conc.eq_dag(spec_from_file)
assert expected.dag_hash() == spec_from_file.dag_hash()
# Ensure directories are properly removed
layout.remove_install_directory(spec)
assert not os.path.isdir(install_dir)
assert not os.path.exists(install_dir)
def test_handle_unknown_package(temporary_store, config, mock_packages):
"""This test ensures that spack can at least do *some*
operations with packages that are installed but that it
does not know about. This is actually not such an uncommon
scenario with spack; it can happen when you switch from a
git branch where you're working on a new package.
This test ensures that the directory layout stores enough
information about installed packages' specs to uninstall
or query them again if the package goes away.
"""
layout = temporary_store.layout
mock_db = spack.repo.RepoPath(spack.paths.mock_packages_path)
not_in_mock = set.difference(
set(spack.repo.all_package_names()),
set(mock_db.all_package_names()))
packages = list(not_in_mock)[:max_packages]
# Create all the packages that are not in mock.
installed_specs = {}
for pkg_name in packages:
spec = spack.repo.get(pkg_name).spec
# If a spec fails to concretize, just skip it. If it is a
# real error, it will be caught by concretization tests.
try:
spec.concretize()
except Exception:
continue
layout.create_install_directory(spec)
installed_specs[spec] = layout.path_for_spec(spec)
with spack.repo.use_repositories(mock_db):
# Now check that even without the package files, we know
# enough to read a spec from the spec file.
for spec, path in installed_specs.items():
spec_from_file = layout.read_spec(
os.path.join(path, '.spack', 'spec.yaml'))
# To satisfy these conditions, directory layouts need to
# read in concrete specs from their install dirs somehow.
assert path == layout.path_for_spec(spec_from_file)
assert spec == spec_from_file
assert spec.eq_dag(spec_from_file)
assert spec.dag_hash() == spec_from_file.dag_hash()
def test_find(temporary_store, config, mock_packages):
"""Test that finding specs within an install layout works."""
layout = temporary_store.layout
packages = list(spack.repo.path.all_packages())[:max_packages]
# Create install prefixes for all packages in the list
installed_specs = {}
for pkg in packages:
if pkg.name.startswith('external'):
# External package tests cannot be installed
continue
spec = pkg.spec.concretized()
installed_specs[spec.name] = spec
layout.create_install_directory(spec)
# Make sure all the installed specs appear in
# DirectoryLayout.all_specs()
found_specs = dict((s.name, s) for s in layout.all_specs())
for name, spec in found_specs.items():
assert name in found_specs
assert found_specs[name].eq_dag(spec)
def test_yaml_directory_layout_build_path(tmpdir, config):
"""This tests build path method."""
spec = Spec('python')
spec.concretize()
layout = YamlDirectoryLayout(str(tmpdir))
rel_path = os.path.join(layout.metadata_dir, layout.packages_dir)
assert layout.build_packages_path(spec) == os.path.join(spec.prefix,
rel_path)
|
the-stack_106_28173 | # Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is used to convert DNA design files into other file formats.
An input caDNAno design file is conveted into a DnaStructure object containing information about the
design (e.g. lattice type, virtual helix definitions) and information derived from that design
(e.g. strands, domains). caDNAno design files may contain deleted/inserted bases. By default the
DnaStructure is not created with deleted/inserted bases. The DnaStructure is created with
deleted/inserted bases by specifying the --modify command-line argument.
"""
import os
import re
import sys
import json
import logging
from .cadnano.reader import CadnanoReader
from .cadnano.writer import CadnanoWriter
from .cadnano.convert_design import CadnanoConvertDesign
from .viewer.writer import ViewerWriter
from .cando.writer import CandoWriter
from .simdna.writer import SimDnaWriter
from .pdbcif.pdb_writer import PdbWriter
from .pdbcif.cif_writer import CifWriter
from ..data.dna_structure import DnaStructure
from ..data.parameters import DnaParameters
from ..utils.xform import Xform,HelixGroupXform,apply_helix_xforms,xform_from_connectors
from .dna_sequence_data import dna_sequence_data
# TODO (JMS, 10/26/16): revisit where the sequence data is kept?
class ConverterFileFormats(object):
""" File format names to convert to/from. """
UNKNOWN = "unknown"
CADNANO = "cadnano"
CANDO = "cando"
CIF = "cif"
PDB = "pdb"
SIMDNA = "simdna"
STRUCTURE = "structure"
TOPOLOGY = "topology"
VIEWER = "viewer"
names = [ CADNANO, CANDO, CIF, PDB, SIMDNA, STRUCTURE, TOPOLOGY, VIEWER ]
class Converter(object):
""" This class stores objects for various models created when reading from a file.
Attributes:
cadnano_design (CadnanoDesign): The object storing the caDNAno design information.
cadnano_convert_design (CadnanoConvertDesign): The object used to convert a caDNAno design into a DnaStructure.
dna_parameters (DnaParameters): The DNA physical parameters used to generate the geometry of a DNA structure
dna_structure (DnaStructure): The object storing connectivity and geometry of a DNA structure.
infile (String): The file name to convert.
informat (String): The format of the file to convert, taken from ConverterFileFormats.
modify (bool): If true then DnaStructure is created with deleted/inserted bases.
outfile (String): The name of the file for converter output.
"""
def __init__(self):
self.cadnano_design = None
self.dna_structure = None
self.cadnano_convert_design = None
self.infile = None
self.informat = None
self.outfile = None
self.modify = False
self.dna_parameters = DnaParameters()
self.logger = logging.getLogger(__name__)
def read_cadnano_file(self, file_name, seq_file_name, seq_name):
""" Read in a caDNAno file.
Arguments:
file_name (String): The name of the caDNAno file to convert.
seq_file_name (String): The name of the CSV file used to assign a DNA base sequence to the DNA structure.
seq_name (String): The name of a sequence used to assign a DNA base sequence to the DNA structure.
"""
cadnano_reader = CadnanoReader()
self.cadnano_design = cadnano_reader.read_json(file_name)
self.cadnano_convert_design = CadnanoConvertDesign(self.dna_parameters)
self.dna_structure = self.cadnano_convert_design.create_structure(self.cadnano_design, self.modify)
# Read in staple sequences from a CSV format file.
if (seq_file_name):
_, file_extension = os.path.splitext(seq_file_name)
if (file_extension == ".csv"):
modified_structure = False
sequence = cadnano_reader.read_csv(seq_file_name)
self.cadnano_convert_design.set_sequence(self.dna_structure, modified_structure, sequence)
# Assign a sequence using a name.
if (seq_name):
if (seq_name not in dna_sequence_data):
self.logger.error("The sequence name %s is not recognized.", seq_name)
modified_structure = False
self.cadnano_convert_design.set_sequence_from_name(self.dna_structure, modified_structure, seq_name)
def write_viewer_file(self, file_name):
""" Write a Nanodesign Viewer file.
Arguments:
file_name (String): The name of the Nanodesign Viewer file to write.
"""
viewer_writer = ViewerWriter(self.dna_structure, self.dna_parameters)
viewer_writer.write(file_name)
def write_pdb_file(self, file_name):
""" Write an RCSB PDB-format file.
Arguments:
file_name (String): The name of the PDB file to write.
"""
pdb_writer = PdbWriter(self.dna_structure)
pdb_writer.write(file_name)
def write_cif_file(self, file_name):
""" Write a RCSB CIF-format file.
Arguments:
file_name (String): The name of the CIF file to write.
"""
cif_writer = CifWriter(self.dna_structure)
cif_writer.write(file_name, self.infile, self.informat )
def write_simdna_file(self, file_name):
""" Write a SimDNA pairs file.
Arguments:
file_name (String): The name of the SimDNA pairs file to write.
"""
simdna_writer = SimDnaWriter(self.dna_structure)
simdna_writer.write(file_name)
def write_topology_file(self, file_name):
""" Write a DNA topology file.
Arguments:
file_name (String): The name of the topology file to write.
"""
self.dna_structure.write_topology(file_name, write_json_format=True)
def write_structure_file(self, file_name):
""" Write a DNA structure file.
Arguments:
file_name (String): The name of the structure file to write.
"""
self.dna_structure.write(file_name,write_json_format=True)
def write_cando_file(self, file_name):
""" Write a CanDo .cndo file.
Arguments:
file_name (String): The name of the CanDo file to write.
"""
cando_writer = CandoWriter(self.dna_structure)
cando_writer.write(file_name)
def write_cadnano_file(self, file_name):
""" Write a caDNAno JSON file.
Arguments:
file_name (String): The name of the caDNAno file to write.
"""
cadnano_writer = CadnanoWriter(self.dna_structure)
cadnano_writer.write(file_name)
def perform_staple_operations(self, staples_arg):
""" Perform operations on staples.
Arguments:
staples_arg (String): The argument to the staples command-line option.
"""
tokens = staples_arg.split(",", 1)
operation = tokens[0]
retain_staples = []
# Parse retained staples IDs.
if len(tokens) == 2:
pattern = re.compile('\W')
retain_tokens = pattern.split(tokens[1])
if retain_tokens[0] == "retain":
retain_colors = [ int(color) for color in retain_tokens[1:] if color != '']
#__if retain_tokens[0] == "retain"
retain_staples = self.dna_structure.get_staples_by_color(retain_colors)
#__if len(tokens) == 2
# Remove all staple strands except those given in retain_staples[].
if operation == "delete":
self.dna_structure.remove_staples(retain_staples)
# Generaqte the maximal staple strand set except those given in retain_staples[].
elif operation == "maximal_set":
self.dna_structure.generate_maximal_staple_set(retain_staples)
#__def perform_staple_operations
def transform_structure(self, transform):
""" Apply 3D geometric transformations to a selected set of helices.
The format of the transform commands is:
helices(0,1):rotate(90,0,0),translate(0,0,0);helices(2,3):rotate(0,90,0),translate(0,0,0)
"""
helices_map = self.dna_structure.structure_helices_map
self.logger.info("Transform %s" % transform)
helix_groups = transform.split(";")
self.logger.info("Number of helix groups %d" % len(helix_groups))
# Parse helix IDs.
helix_group_xforms = []
for helix_group in helix_groups:
tokens = helix_group.split(":")
pattern = re.compile(r"[,()]")
helix_tokens = pattern.split(tokens[0])
helix_ids = []
for s in helix_tokens:
if s == "helices":
continue
elif "-" in s:
rtoks = s.split("-")
start = int(rtoks[0])
end = int(rtoks[1])+1
for id in xrange(start,end):
helix_ids.append(id)
elif s:
helix_ids.append(int(s))
#__for s in helix_tokens
# Check helix IDs.
helices = []
for hid in helix_ids:
helix = helices_map.get(hid, None)
if not helix:
self.logger.error("Helix ID %d not found in dna structure." % hid)
self.logger.error("DNA Structure has helix IDs %s " % str(helices_map.keys()))
return
helices.append(helix)
#__if not helix:
#__for hid in helix_ids
self.logger.info("Helix group %s" % str(helix_ids))
# Parse transformations.
self.logger.info("Transformation \'%s\'" % tokens[1])
pattern = re.compile(r"[(),]")
xform_tokens = pattern.split(tokens[1])
n = 0
use_connectors = False
xform = Xform()
while (n != len(xform_tokens)):
s = xform_tokens[n]
if s == "rotate":
rotations = []
rotations.append(float(xform_tokens[n+1]))
rotations.append(float(xform_tokens[n+2]))
rotations.append(float(xform_tokens[n+3]))
n += 3
xform.add_rotation(rotations)
rotations = []
elif s == "translate":
translation = []
translation.append(float(xform_tokens[n+1]))
translation.append(float(xform_tokens[n+2]))
translation.append(float(xform_tokens[n+3]))
n += 3
xform.set_translation(translation)
elif s == "connectors":
use_connectors = True
strand_name = xform_tokens[n+1]
n += 1
#__if s == "rotate"
n += 1
#__while (n != len(xform_tokens))
# Automatically generate the transformation the moves one group of helices to another
# using the connections of distance crossovers.
if use_connectors:
self.logger.info("Use connectors with strand \'%s\'" % strand_name)
connector_strands = []
for strand in self.dna_structure.strands:
if strand.is_scaffold:
connector_strands.append(strand)
helix_dist = self.dna_structure.dna_parameters.helix_distance
xform_from_connectors(connector_strands, helix_ids, helix_dist, xform)
#__if use_connectors
helix_group_xforms.append( HelixGroupXform(helices, xform) )
#__for helix_group in helix_groups
# Apply the transformation to the dna structure helices.
apply_helix_xforms(helix_group_xforms)
#__def transform_structure
def set_module_loggers(self, names):
module_names = names.split(",")
for module in module_names:
logger = logging.getLogger(module)
logger.setLevel(logging.DEBUG)
#__for module in modules
#__def set_debugging_loggers
|
the-stack_106_28175 | import torch
from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
from ..builder import HEADS, build_head, build_roi_extractor
from .base_roi_head import BaseRoIHead
from .test_mixins import BBoxTestMixin, MaskTestMixin
@HEADS.register_module()
class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
"""Simplest base roi head including one bbox head and one mask head."""
def init_assigner_sampler(self):
"""Initialize assigner and sampler."""
self.bbox_assigner = None
self.bbox_sampler = None
if self.train_cfg:
self.bbox_assigner = build_assigner(self.train_cfg.assigner)
self.bbox_sampler = build_sampler(
self.train_cfg.sampler, context=self)
def init_bbox_head(self, bbox_roi_extractor, bbox_head):
"""Initialize ``bbox_head``"""
self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor)
self.bbox_head = build_head(bbox_head)
def init_mask_head(self, mask_roi_extractor, mask_head):
"""Initialize ``mask_head``"""
if mask_roi_extractor is not None:
self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)
self.share_roi_extractor = False
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
self.mask_head = build_head(mask_head)
def forward_dummy(self, x, proposals):
"""Dummy forward function."""
# bbox head
outs = ()
rois = bbox2roi([proposals])
if self.with_bbox:
bbox_results = self._bbox_forward(x, rois)
outs = outs + (bbox_results['cls_score'],
bbox_results['bbox_pred'])
# mask head
if self.with_mask:
mask_rois = rois[:100]
mask_results = self._mask_forward(x, mask_rois)
outs = outs + (mask_results['mask_pred'], )
return outs
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposals (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# assign gts and sample proposals
if self.with_bbox or self.with_mask:
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = self.bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
losses = dict()
# bbox head forward and loss
if self.with_bbox:
bbox_results = self._bbox_forward_train(x, sampling_results,
gt_bboxes, gt_labels,
img_metas)
losses.update(bbox_results['loss_bbox'])
# mask head forward and loss
if self.with_mask:
mask_results = self._mask_forward_train(x, sampling_results,
bbox_results['bbox_feats'],
gt_masks, img_metas)
losses.update(mask_results['loss_mask'])
return losses
def _bbox_forward(self, x, rois):
"""Box head forward function used in both training and testing."""
# TODO: a more flexible way to decide which feature maps to use
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = self.bbox_head(bbox_feats)
bbox_results = dict(
cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
return bbox_results
def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
img_metas):
"""Run forward function and calculate loss for box head in training."""
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(x, rois)
bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
gt_labels, self.train_cfg)
loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
img_metas):
"""Run forward function and calculate loss for mask head in
training."""
if not self.share_roi_extractor:
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
mask_results = self._mask_forward(x, pos_rois)
else:
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(
torch.ones(
res.pos_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds.append(
torch.zeros(
res.neg_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
mask_results = self._mask_forward(
x, pos_inds=pos_inds, bbox_feats=bbox_feats)
mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,
self.train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.mask_head.loss(mask_results['mask_pred'],
mask_targets, pos_labels)
mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets)
return mask_results
def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None):
"""Mask head forward function used in both training and testing."""
assert ((rois is not None) ^
(pos_inds is not None and bbox_feats is not None))
if rois is not None:
mask_feats = self.mask_roi_extractor(
x[:self.mask_roi_extractor.num_inputs], rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
else:
assert bbox_feats is not None
mask_feats = bbox_feats[pos_inds]
mask_pred = self.mask_head(mask_feats)
mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats)
return mask_results
async def async_simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
"""Async test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
det_bboxes, det_labels = await self.async_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
bbox_results = bbox2result(det_bboxes, det_labels,
self.bbox_head.num_classes)
if not self.with_mask:
return bbox_results
else:
segm_results = await self.async_test_mask(
x,
img_metas,
det_bboxes,
det_labels,
rescale=rescale,
mask_test_cfg=self.test_cfg.get('mask'))
return bbox_results, segm_results
def simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
bbox_results = [
bbox2result(det_bboxes[i], det_labels[i],
self.bbox_head.num_classes)
for i in range(len(det_bboxes))
]
if not self.with_mask:
return bbox_results
else:
segm_results = self.simple_test_mask(
x, img_metas, det_bboxes, det_labels, rescale=rescale)
return list(zip(bbox_results, segm_results))
def aug_test(self, x, proposal_list, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas,
proposal_list,
self.test_cfg)
if rescale:
_det_bboxes = det_bboxes
else:
_det_bboxes = det_bboxes.clone()
_det_bboxes[:, :4] *= det_bboxes.new_tensor(
img_metas[0][0]['scale_factor'])
bbox_results = bbox2result(_det_bboxes, det_labels,
self.bbox_head.num_classes)
# det_bboxes always keep the original scale
if self.with_mask:
segm_results = self.aug_test_mask(x, img_metas, det_bboxes,
det_labels)
return [(bbox_results, segm_results)]
else:
return [bbox_results]
def onnx_export(self, x, proposals, img_metas, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
det_bboxes, det_labels = self.bbox_onnx_export(
x, img_metas, proposals, self.test_cfg, rescale=rescale)
if not self.with_mask:
return det_bboxes, det_labels
else:
segm_results = self.mask_onnx_export(
x, img_metas, det_bboxes, det_labels, rescale=rescale)
return det_bboxes, det_labels, segm_results
def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs):
"""Export mask branch to onnx which supports batch inference.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
img_metas (list[dict]): Image meta info.
det_bboxes (Tensor): Bboxes and corresponding scores.
has shape [N, num_bboxes, 5].
det_labels (Tensor): class labels of
shape [N, num_bboxes].
Returns:
tuple[Tensor, Tensor]: bboxes of shape [N, num_bboxes, 5]
and class labels of shape [N, num_bboxes].
"""
# image shapes of images in the batch
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
raise RuntimeError('[ONNX Error] Can not record MaskHead '
'as it has not been executed this time')
batch_size = det_bboxes.size(0)
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
det_bboxes = det_bboxes[..., :4]
batch_index = torch.arange(
det_bboxes.size(0), device=det_bboxes.device).float().view(
-1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1)
mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)
mask_rois = mask_rois.view(-1, 5)
mask_results = self._mask_forward(x, mask_rois)
mask_pred = mask_results['mask_pred']
max_shape = img_metas[0]['img_shape_for_onnx']
num_det = det_bboxes.shape[1]
det_bboxes = det_bboxes.reshape(-1, 4)
det_labels = det_labels.reshape(-1)
segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes,
det_labels, self.test_cfg,
max_shape)
segm_results = segm_results.reshape(batch_size, num_det, max_shape[0],
max_shape[1])
return segm_results
def bbox_onnx_export(self, x, img_metas, proposals, rcnn_test_cfg,
**kwargs):
"""Export bbox branch to onnx which supports batch inference.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
img_metas (list[dict]): Image meta info.
proposals (Tensor): Region proposals with
batch dimension, has shape [N, num_bboxes, 5].
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
Returns:
tuple[Tensor, Tensor]: bboxes of shape [N, num_bboxes, 5]
and class labels of shape [N, num_bboxes].
"""
# get origin input shape to support onnx dynamic input shape
assert len(
img_metas
) == 1, 'Only support one input image while in exporting to ONNX'
img_shapes = img_metas[0]['img_shape_for_onnx']
rois = proposals
batch_index = torch.arange(
rois.size(0), device=rois.device).float().view(-1, 1, 1).expand(
rois.size(0), rois.size(1), 1)
rois = torch.cat([batch_index, rois[..., :4]], dim=-1)
batch_size = rois.shape[0]
num_proposals_per_img = rois.shape[1]
# Eliminate the batch dimension
rois = rois.view(-1, 5)
bbox_results = self._bbox_forward(x, rois)
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
# Recover the batch dimension
rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))
cls_score = cls_score.reshape(batch_size, num_proposals_per_img,
cls_score.size(-1))
bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img,
bbox_pred.size(-1))
det_bboxes, det_labels = self.bbox_head.onnx_export(
rois, cls_score, bbox_pred, img_shapes, cfg=rcnn_test_cfg)
return det_bboxes, det_labels
|
the-stack_106_28176 | #######################################################################
# Implementation of FM partition
# You need to implement initialize() and partition_one_pass()
# All codes should be inside FM_Partition class
# Name: Dennis Liu
# UT EID: dl34437
#######################################################################
from typing import List, Tuple
import numpy as np
import math as m
from .p1_partition_base import FM_Partition_Base
__all__ = ["FM_Partition"]
class FM_Partition(FM_Partition_Base):
def __init__(self) -> None:
super().__init__()
def update_gbuckets(self): # Calculates gains for all nodes - technically only neighbors are needed, but this is what I have for now
self.block0_g.clear()
self.block1_g.clear()
for n in range(len(self.partition[0])): # Computing gains for each node in block0 - moves each node in block0 to block1, calculates cut size diff, then moves them back
if self.partition[0][n] in self.locked_nodes: # If the current node we are looking at is in the locked nodes list, just ignore it
continue
else:
self.partition[1].append(self.partition[0][n])
self.partition[0].pop(n)
temp_cut_size = self.compute_cut_size(self.partition)
temp_cut_size = self.cut_size - temp_cut_size
self.partition[0].insert(n, self.partition[1][-1])
self.partition[1].pop()
if temp_cut_size in self.block0_g.keys():
self.block0_g.get(temp_cut_size).append(self.partition[0][n])
self.block0_g.get(temp_cut_size).sort()
else:
self.block0_g[temp_cut_size] = [self.partition[0][n]]
for n in range(len(self.partition[1])): # Computing gains for each node in block1
if self.partition[1][n] in self.locked_nodes:
continue
else:
self.partition[0].append(self.partition[1][n])
self.partition[1].pop(n)
temp_cut_size = self.compute_cut_size(self.partition)
temp_cut_size = self.cut_size - temp_cut_size
self.partition[1].insert(n, self.partition[0][-1])
self.partition[0].pop()
if temp_cut_size in self.block1_g.keys():
self.block1_g.get(temp_cut_size).append(self.partition[1][n])
self.block1_g.get(temp_cut_size).sort()
else:
self.block1_g[temp_cut_size] = [self.partition[1][n]]
''' DEAD FUNCTION
def update_gnodes(self):
listk = []
listk1 = []
for key, lists in self.block0_g.items():
for l_nodes in self.locked_nodes:
if l_nodes in lists:
lists.remove(l_nodes)
if not lists:
listk.append(key)
for key in listk:
self.block0_g.pop(key)
for key, lists in self.block1_g.items():
for l_nodes in self.locked_nodes:
if l_nodes in lists:
lists.remove(l_nodes)
if not lists:
listk1.append(key)
for key in listk1:
self.block1_g.pop(key)
'''
def move_rtl(self):
#x = sorted(self.block1_g.keys()) # Get all gains in block1 and sort
biggest_gains = self.block1_g.get(max(self.block1_g)) # Get the highest gain's list of nodes
biggest_gains.sort()
self.locked_nodes.append(biggest_gains[0])
self.partition[0].append(biggest_gains[0]) # Add in the first node to the "left" (block0)
self.partition[1].remove(biggest_gains[0]) # Remove the node from the "right" (block 1)
def move_ltr(self): # Same as move_rtl, but the blocks have been switched
#x = sorted(self.block0_g.keys())
biggest_gains = self.block0_g.get(max(self.block0_g))
biggest_gains.sort()
self.locked_nodes.append(biggest_gains[0])
self.partition[1].append(biggest_gains[0])
self.partition[0].remove(biggest_gains[0])
def move(self):
#x = sorted(self.block1_g.keys())
#y = self.block0_g.keys().sort()
if self.n_nodes - len(self.locked_nodes) > 1:
if max(self.block1_g) > max(self.block0_g): # If the largest gain on block1 is greater than the largest gain in block0, perform a right-to-left move
self.move_rtl()
elif max(self.block0_g) > max(self.block1_g): # Vice-versa
self.move_ltr()
else: # Both blocks have an equal maximum gain; find the first node via node number tie-breaker and then perform a rtl or ltr move
a = self.block0_g.get(max(self.block0_g))
b = self.block1_g.get(max(self.block1_g))
a.sort()
b.sort()
if a[0] < b[0]:
self.move_rtl()
else:
self.move_ltr()
else: # There is only one node left to move
if self.block0_g:
self.move_ltr()
else:
self.move_rtl()
def initialize(self):
"""Initialize necessary data structures before starting solving the problem
"""
self.cut_size = 0
self.cut_size_list = []
self.best_sol = ()
self.best_cut_size = 0
self.locked_nodes = []
self.block0 = []
self.block1 = []
self.r_ep = 0
self.block0_g = {}
self.block1_g = {}
# TODO initial solutions: block 0 and block 1
# To ensure a deterministic solution, use the following partition as the initial solution
# sort the node names in alphabetical order
# the first floor(N/2) nodes are in the first partition, The rest N-floor(N/2) nodes are in the second partition
# a_0, a_1, ..., a_N/2-1 | a_N/2, a_N/2+1, ..., a_N-1, if N even
# a_0, a_1, ..., a_(N-3)/2 | a_(N-1)/2, ..., a_N-1, if N odd
# ...
# Creates the initial partition
if self.n_nodes % 2 == 0:
for x in range(int(self.n_nodes / 2)):
self.block0.append(x)
for y in range(int(self.n_nodes / 2), self.n_nodes):
self.block1.append(y)
self.partition = (self.block0, self.block1)
else:
for x in range(m.floor(self.n_nodes / 2)):
self.block0.append(x)
for y in range(m.floor(self.n_nodes / 2), self.n_nodes):
self.block1.append(y)
self.partition = (self.block0, self.block1) # Partition is Tuple(List[int], List[int]) with int representation of nodes
# Sets initial values after the first partition
self.cut_size = self.compute_cut_size(self.partition)
self.cut_size_list.append(self.cut_size)
self.best_cut_size = self.cut_size
self.r_ep = self.min_cut_ratio - self.min_cut_ratio_epsilon
self.best_sol = ([self.node2node_name_map[n] for n in self.partition[0]], [self.node2node_name_map[n] for n in self.partition[1]])
# Initialize gain buckets
self.update_gbuckets()
#print(self.partition)
#print(self.block0_g)
#print(self.block1_g)
#print(self.cut_size)
# TODO initialize any auxiliary data structure you need
# e.g., node2net_map, cell gains, locked cells, etc.
def partition_one_pass(self) -> Tuple[List[int], Tuple[List[str], List[str]], int]:
"""FM graph partition algorithm for one pass
Return:
cut_size_list (List[int]): contains the initial cut size and the cut size after each move
best_sol (Tuple[List[str], List[str]]): The best partition solution is a tuple of two blocks.
Each block is a list of node names. (Please use the original node names from the benchmark file.
Hint: you might need to use node2node_name_map). If multiple solutions have the best cut size, return the first one.
best_cut_size (int): The cut size corresponding to the best partition solution
"""
# TODO implement your FM partition algorithm for one pass.
# To make this method clean, you can extract subroutines as methods of this class
# But do not override methods in the parent class
# Please strictly follow the return type requirement.
# (m.min(len(self.partition[0]), len(self.partition[1])) / self.n_nodes) < r_ep
while len(self.locked_nodes) < self.n_nodes: # As long as the locked-nodes list does not have all the nodes, we can iterate and swap
print(self.partition)
print(self.block0_g)
print(self.block1_g)
print(self.cut_size)
self.rtl_min = min((len(self.partition[1]) - 1), (len(self.partition[0]) + 1))
self.ltr_min = min((len(self.partition[0]) - 1), (len(self.partition[1]) + 1))
if (self.rtl_min / self.n_nodes) < self.r_ep: # If a right-to-left (block1 to block0) move would make the right partition too small, then execute a left-to-right move
self.move_ltr()
elif (self.ltr_min / self.n_nodes) < self.r_ep: # If a left-to-right move would make the left partition too small, then execute a right-to-left move
self.move_rtl()
else: # If both moves are acceptable, then find the best move out of both sides
self.move()
#self.update_gnodes()
self.update_gbuckets() # After a move, the gains are re-calculated
self.cut_size = self.compute_cut_size(self.partition) # Add the new cutsize to the history; check to see if we have a new best
self.cut_size_list.append(self.cut_size)
if self.cut_size < self.best_cut_size:
self.best_cut_size = self.cut_size
self.best_sol = ([self.node2node_name_map[n] for n in self.partition[0]], [self.node2node_name_map[n] for n in self.partition[1]])
#print(self.partition)
#print(self.block0_g)
#print(self.block1_g)
#print(self.cut_size)
else:
return self.cut_size_list, self.best_sol, self.best_cut_size # No moves available, first pass is over. Return gathered values.
|
the-stack_106_28177 | from sklearn import cluster
from distance import calc_ars, get_gmm_clusters, principal_angle_distance
import click
from plot_utils import get_nx_graph
from utils import get_as_numpy_array, map_embeddings_to_consecutive
@click.command()
@click.option("-e1", "--embeddings_1", type=str, required=True)
@click.option("-e2", "--embeddings_2", type=str, required=True)
@click.option("-g", "--graph_path", type=str)
@click.option("-c", "--clusters", type=int, required=True)
@click.option("-m", "--method", type=click.Choice(['kmeans', 'gmm']))
def ars(embeddings_1, embeddings_2, graph_path, clusters, method):
emb1, emb2 = map_embeddings_to_consecutive([embeddings_1, embeddings_2])
if method == 'kmeans':
prediction1 = cluster.KMeans(n_clusters=clusters, random_state=0).fit(emb1).labels_
prediction2 = cluster.KMeans(n_clusters=clusters, random_state=0).fit(emb2).labels_
else:
prediction1 = get_gmm_clusters(emb1, clusters)
prediction2 = get_gmm_clusters(emb2, clusters)
click.echo(f"Adjusted Rand Score: {calc_ars(prediction1, prediction2)}")
if __name__ == '__main__':
ars()
|
the-stack_106_28179 | from .authenticator import Authenticator
from .websocket_client import WebSocketClient
from .constants import (
API_URL,
WEB_BASE_URL,
WS_BASE_URL,
START_SPLASH_TEXT,
END_SPLASH_TEXT,
codes,
)
from .tester import Tester
from .web_server import WebServer
from .logger import logger
class LocalServer:
"""
Main class for the local server
"""
def __init__(self):
self.status_code = codes.INTERNAL_SETUP
if "_is_from_test" not in self.__dict__:
self._is_from_test = False
# WebSocket client
self.ws_client = WebSocketClient(self, WS_BASE_URL)
# WebServer
self.web_server = WebServer(self)
# User authentication
self.authenticator = Authenticator(self)
# Runs tests for the system
self.tester = Tester(self)
# for testing, do not touch
@property
def status_code_name(self):
return codes.to_name(self.status_code)
def run(self):
"""
Method for starting the system
Runs all the tests
Authenticates with a user
Establishes Websocket connection
:return: Nothing
"""
print(START_SPLASH_TEXT)
logger.info("Starting Local Server")
# Start the Web Server
try:
self.set_status(codes.WEBSERVER_SETUP)
self.web_server.start()
except:
logger.error("COULD NOT START THE WEBSERVER")
self.set_status(codes.WEBSERVER_SETUP_ERROR)
# tests system
self.tester.establish_successful_tests()
# work on the authenticator
if not self.authenticator.is_authenticated:
self.authenticator.authenticate()
self.ws_client.establish_connection()
def stop(self, code=None):
"""
Stops the system
:param code: Code or reason for the shut down
:return: None
"""
logger.info("SHUTTING DOWN...")
self.web_server.stop()
logger.debug("good bye <3")
print(END_SPLASH_TEXT)
# exit(code)
def set_status(self, code):
"""
Sets the internal status code
:param code: int of the status code
:return: None
"""
self.status_code = code
|
the-stack_106_28181 | from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import shlex
import stat
import time
import unittest
from typing import IO, Optional, Union
import common_tests
import hierarchy_tests
from hh_paths import hh_client
from saved_state_test_driver import (
SavedStateClassicTestDriver,
SavedStateTestDriver,
SaveStateResult,
)
def write_echo_json(f: IO, obj: object) -> None:
f.write("echo %s\n" % shlex.quote(json.dumps(obj)))
class LazyInitTestDriver(SavedStateTestDriver):
def write_local_conf(self) -> None:
with open(os.path.join(self.repo_dir, "hh.conf"), "w") as f:
f.write(
r"""
# some comment
use_mini_state = true
use_watchman = true
watchman_subscribe_v2 = true
lazy_decl = true
lazy_parse = true
lazy_init2 = true
incremental_init = true
enable_fuzzy_search = false
"""
)
class LazyInitCommonTests(
common_tests.CommonTests, LazyInitTestDriver, unittest.TestCase
):
pass
class LazyInitHeirarchyTests(
hierarchy_tests.HierarchyTests, LazyInitTestDriver, unittest.TestCase
):
pass
class SavedStateCommonTests(
common_tests.CommonTests, SavedStateTestDriver, unittest.TestCase
):
pass
class SavedStateBarebonesTestsClassic(
common_tests.BarebonesTests, SavedStateClassicTestDriver, unittest.TestCase
):
pass
class SavedStateHierarchyTests(
hierarchy_tests.HierarchyTests, SavedStateTestDriver, unittest.TestCase
):
pass
class SavedStateTests(SavedStateTestDriver, unittest.TestCase):
template_repo: Optional[str] = "hphp/hack/test/integration/data/simple_repo"
bin_dir: Optional[str]
def test_hhconfig_change(self) -> None:
"""
Start hh_server, then change .hhconfig and check that the server
restarts itself
"""
self.start_hh_server()
self.check_cmd(["No errors!"])
with open(os.path.join(self.repo_dir, ".hhconfig"), "w") as f:
f.write(
r"""
# some comment
assume_php = true
"""
)
# Server may take some time to kill itself.
time.sleep(2)
# The sleep(2) above also almost-always ensures another race condition
# goes the way we want: The informant-directed restart doesn't happen
# *during* processing of a new client connection. The ambiguity of that
# situation (whether or not the newly-connected client did read the
# new hhconfig file contents or not) means that the Monitor can't safely
# start a new server instance until the *next* client connects. Just in
# case the race doesn't go the way we want, add another "check_cmd"
# call here to force the Monitor into the state we want.
self.check_cmd(None, assert_loaded_saved_state=False)
# this should start a new server
self.check_cmd(["No errors!"])
# check how the old one exited
log_file = (
self.proc_call([hh_client, "--logname", self.repo_dir])[0].strip() + ".old"
)
with open(log_file) as f:
logs = f.read()
self.assertIn(".hhconfig changed in an incompatible way", logs)
def test_watchman_timeout(self) -> None:
with open(os.path.join(self.repo_dir, "hh.conf"), "a") as f:
f.write(
r"""
watchman_init_timeout = 1
"""
)
assert self.bin_dir is not None
with open(os.path.join(self.bin_dir, "watchman"), "w") as f:
f.write(r"""sleep 2""")
os.fchmod(f.fileno(), stat.S_IRWXU)
self.run_check()
# Stop the server, ensuring that its logs get flushed
self.proc_call([hh_client, "stop", self.repo_dir])
self.assertIn("Watchman_sig.Types.Timeout", self.get_server_logs())
def test_incrementally_generated_saved_state(self) -> None:
old_saved_state: SaveStateResult = self.dump_saved_state()
new_file = os.path.join(self.repo_dir, "class_3b.php")
self.add_file_that_depends_on_class_a(new_file)
self.check_cmd(["No errors!"], assert_loaded_saved_state=False)
new_saved_state: SaveStateResult = (
self.dump_saved_state(assert_edges_added=True)
)
assert (
new_saved_state.returned_values.edges_added is not None
and new_saved_state.returned_values.edges_added > 0
)
self.change_return_type_on_base_class(
os.path.join(self.repo_dir, "class_1.php")
)
self.check_cmd(
[
"{root}class_3.php:5:12,19: Invalid return type (Typing[4110])",
" {root}class_3.php:4:28,30: This is an int",
" {root}class_1.php:5:33,38: It is incompatible with a string",
"{root}class_3b.php:5:8,15: Invalid return type (Typing[4110])",
" {root}class_3b.php:4:26,28: This is an int",
" {root}class_1.php:5:33,38: It is incompatible with a string",
],
assert_loaded_saved_state=False,
)
self.proc_call([hh_client, "stop", self.repo_dir])
# Start server with the original saved state. Will be missing the
# second error because of the missing edge.
self.start_hh_server(
changed_files=["class_1.php"], saved_state_path=old_saved_state.path
)
self.check_cmd(
[
"{root}class_3.php:5:12,19: Invalid return type (Typing[4110])",
" {root}class_3.php:4:28,30: This is an int",
" {root}class_1.php:5:33,38: It is incompatible with a string",
]
)
self.proc_call([hh_client, "stop", self.repo_dir])
# Start another server with the new saved state. Will have both errors.
self.start_hh_server(
changed_files=["class_1.php"], saved_state_path=new_saved_state.path
)
self.check_cmd(
[
"{root}class_3.php:5:12,19: Invalid return type (Typing[4110])",
" {root}class_3.php:4:28,30: This is an int",
" {root}class_1.php:5:33,38: It is incompatible with a string",
"{root}class_3b.php:5:8,15: Invalid return type (Typing[4110])",
" {root}class_3b.php:4:26,28: This is an int",
" {root}class_1.php:5:33,38: It is incompatible with a string",
]
)
def test_incrementally_generated_saved_state_after_loaded_saved_state(self) -> None:
# Same as the above test, except we begin the test by starting up
# a Hack Server that loads a saved state.
self.start_hh_server()
# Hack server is now started with a saved state
self.check_cmd(["No errors!"], assert_loaded_saved_state=True)
old_saved_state = self.dump_saved_state()
new_file = os.path.join(self.repo_dir, "class_3b.php")
self.add_file_that_depends_on_class_a(new_file)
self.check_cmd(["No errors!"], assert_loaded_saved_state=True)
new_saved_state = self.dump_saved_state(assert_edges_added=True)
assert (
new_saved_state.returned_values.edges_added is not None
and new_saved_state.returned_values.edges_added > 0
)
self.change_return_type_on_base_class(
os.path.join(self.repo_dir, "class_1.php")
)
self.check_cmd(
[
"{root}class_3.php:5:12,19: Invalid return type (Typing[4110])",
" {root}class_3.php:4:28,30: This is an int",
" {root}class_1.php:5:33,38: It is incompatible with a string",
"{root}class_3b.php:5:8,15: Invalid return type (Typing[4110])",
" {root}class_3b.php:4:26,28: This is an int",
" {root}class_1.php:5:33,38: It is incompatible with a string",
],
assert_loaded_saved_state=True,
)
self.proc_call([hh_client, "stop", self.repo_dir])
# Start server with the original saved state. Will be missing the
# second error because of the missing edge.
self.start_hh_server(
changed_files=["class_1.php"], saved_state_path=old_saved_state.path
)
self.check_cmd(
[
"{root}class_3.php:5:12,19: Invalid return type (Typing[4110])",
" {root}class_3.php:4:28,30: This is an int",
" {root}class_1.php:5:33,38: It is incompatible with a string",
]
)
self.proc_call([hh_client, "stop", self.repo_dir])
# Start another server with the new saved state. Will have both errors.
self.start_hh_server(
changed_files=["class_1.php"], saved_state_path=new_saved_state.path
)
self.check_cmd(
[
"{root}class_3.php:5:12,19: Invalid return type (Typing[4110])",
" {root}class_3.php:4:28,30: This is an int",
" {root}class_1.php:5:33,38: It is incompatible with a string",
"{root}class_3b.php:5:8,15: Invalid return type (Typing[4110])",
" {root}class_3b.php:4:26,28: This is an int",
" {root}class_1.php:5:33,38: It is incompatible with a string",
]
)
def test_incrementally_generated_saved_state_with_errors(self) -> None:
# Introduce an error in "master"
self.change_return_type_on_base_class(
os.path.join(self.repo_dir, "class_1.php")
)
saved_state_with_1_error: SaveStateResult = self.dump_saved_state(
ignore_errors=True
)
self.proc_call([hh_client, "stop", self.repo_dir])
# Start server with the saved state, assume there are no local changes.
self.start_hh_server(
changed_files=None, saved_state_path=saved_state_with_1_error.path
)
# We still expect that the error from the saved state shows up.
self.check_cmd(
[
"{root}class_3.php:5:12,19: Invalid return type (Typing[4110])",
" {root}class_3.php:4:28,30: This is an int",
" {root}class_1.php:5:33,38: It is incompatible with a string",
]
)
self.proc_call([hh_client, "stop", self.repo_dir])
new_file = os.path.join(self.repo_dir, "class_3b.php")
self.add_file_that_depends_on_class_a(new_file)
# Start server with the saved state, the only change is in the new file.
self.start_hh_server(
changed_files=["class_3b.php"],
saved_state_path=saved_state_with_1_error.path,
)
# Now we expect 2 errors - one from the saved state and one
# from the change.
self.check_cmd(
[
"{root}class_3.php:5:12,19: Invalid return type (Typing[4110])",
" {root}class_3.php:4:28,30: This is an int",
" {root}class_1.php:5:33,38: It is incompatible with a string",
"{root}class_3b.php:5:8,15: Invalid return type (Typing[4110])",
" {root}class_3b.php:4:26,28: This is an int",
" {root}class_1.php:5:33,38: It is incompatible with a string",
],
assert_loaded_saved_state=False,
)
saved_state_with_2_errors = self.dump_saved_state(ignore_errors=True)
self.proc_call([hh_client, "stop", self.repo_dir])
# Let's fix the error
self.change_return_type_on_base_class(
filename=os.path.join(self.repo_dir, "class_1.php"), type="int", value="11"
)
# Start another server with the new saved state. Will have both errors.
self.start_hh_server(
changed_files=["class_1.php"],
saved_state_path=saved_state_with_2_errors.path,
)
self.check_cmd(["No errors!"], assert_loaded_saved_state=True)
def test_replace_state_after_saving(self) -> None:
# Save state
result = self.dump_saved_state(assert_edges_added=True)
assert (
result.returned_values.edges_added is not None
and result.returned_values.edges_added > 0
)
# Save state again - confirm the same number of edges is dumped
result2 = self.dump_saved_state(assert_edges_added=True)
self.assertEqual(
result.returned_values.edges_added, result2.returned_values.edges_added
)
# Save state with the 'replace' arg
replace_result1 = self.dump_saved_state(
assert_edges_added=True, replace_state_after_saving=True
)
self.assertEqual(
result.returned_values.edges_added,
replace_result1.returned_values.edges_added,
)
# Save state with the new arg - confirm there are 0 new edges
replace_result2 = self.dump_saved_state(
assert_edges_added=True, replace_state_after_saving=True
)
self.assertEqual(replace_result2.returned_values.edges_added, 0)
# Make a change
# Save state - confirm there are only the # of new edges
# corresponding to the one change
new_file = os.path.join(self.repo_dir, "class_3b.php")
self.add_file_that_depends_on_class_a(new_file)
self.check_cmd(["No errors!"], assert_loaded_saved_state=False)
replace_incremental = self.dump_saved_state(
assert_edges_added=True, replace_state_after_saving=True
)
assert (
replace_incremental.returned_values.edges_added is not None
and replace_incremental.returned_values.edges_added
< result.returned_values.edges_added
)
assert replace_incremental.returned_values.edges_added > 0
self.check_cmd(["No errors!"], assert_loaded_saved_state=False)
def add_file_that_depends_on_class_a(self, filename: str) -> None:
with open(filename, "w") as f:
f.write(
"""<?hh // strict
class UsesAToo {
public function test() : int {
return A::foo();
}
}
"""
)
def change_return_type_on_base_class(
self, filename: str, type: str = "string", value: str = '"Hello"'
) -> None:
# Change the return type
with open(filename, "w") as f:
f.write(
"""<?hh // strict
class B {
public static function foo () : %s {
return %s;
}
}
"""
% (type, value)
)
|
the-stack_106_28183 |
from sqlalchemy.orm import sessionmaker
import requests
import os
import json
import models
import wingo_fiber
def save_appendix(model, item):
idx = model.id
# Init
directory = "results/{:}".format(idx)
if not os.path.exists(directory):
os.mkdir(directory)
# Download images
for i, url in enumerate(item['images']):
response = requests.get(url, stream=True)
ext = url.split(".")[-1]
with open(directory + '/img_{:}.{:}'.format(i, ext), 'wb') as f:
f.write(response.content)
del response
# Download appendix
for i, url in enumerate(item['appendix']):
ext = url.split(".")[-1]
if ext is not "pdf":
continue
response = requests.get(url, stream=True)
with open(directory + '/app_{:}.{:}'.format(i, ext), 'wb') as f:
f.write(response.content)
del response
return
class ApartmentPipeline(object):
def __init__(self):
engine = models.db_connect()
models.create_table(engine)
self.Session = sessionmaker(bind=engine)
def process_item(self, item, spider):
"""
This method is called for every returned item
"""
session = self.Session()
# is this a new apartment
exists = session.query(models.ApartmentModel).filter_by(url=item['url']).count() > 0
if exists:
return item
# convert from ApartmentItem to ApartmentModel
model = models.ApartmentModel()
model.url = item['url']
model.address = item['address']
model.rent = item['rent']
model.rooms = item['rooms']
model.livingspace = item['livingspace']
try:
session.add(model)
session.commit()
print(model)
except:
session.rollback()
raise
finally:
session.close()
return item
|
the-stack_106_28184 | import json
from django.test import override_settings
from allauth.account.models import EmailAddress
from allauth.socialaccount.models import SocialAccount
from allauth.socialaccount.providers.amazon_cognito.provider import (
AmazonCognitoProvider,
)
from allauth.socialaccount.providers.amazon_cognito.views import (
AmazonCognitoOAuth2Adapter,
)
from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase
def _get_mocked_claims():
return {
"sub": "4993b410-8a1b-4c36-b843-a9c1a697e6b7",
"given_name": "John",
"family_name": "Doe",
"email": "[email protected]",
"username": "johndoe",
}
@override_settings(
SOCIALACCOUNT_PROVIDERS={
"amazon_cognito": {"DOMAIN": "https://domain.auth.us-east-1.amazoncognito.com"}
}
)
class AmazonCognitoTestCase(OAuth2TestsMixin, TestCase):
provider_id = AmazonCognitoProvider.id
def get_mocked_response(self):
mocked_payload = json.dumps(_get_mocked_claims())
return MockedResponse(status_code=200, content=mocked_payload)
@override_settings(SOCIALACCOUNT_PROVIDERS={"amazon_cognito": {}})
def test_oauth2_adapter_raises_if_domain_settings_is_missing(
self,
):
mocked_response = self.get_mocked_response()
with self.assertRaises(
ValueError,
msg=AmazonCognitoOAuth2Adapter.DOMAIN_KEY_MISSING_ERROR,
):
self.login(mocked_response)
def test_saves_email_as_verified_if_email_is_verified_in_cognito(
self,
):
mocked_claims = _get_mocked_claims()
mocked_claims["email_verified"] = True
mocked_payload = json.dumps(mocked_claims)
mocked_response = MockedResponse(status_code=200, content=mocked_payload)
self.login(mocked_response)
user_id = SocialAccount.objects.get(uid=mocked_claims["sub"]).user_id
email_address = EmailAddress.objects.get(user_id=user_id)
self.assertEqual(email_address.email, mocked_claims["email"])
self.assertTrue(email_address.verified)
def test_provider_slug_replaces_underscores_with_hyphens(self):
self.assertTrue("_" not in self.provider.get_slug())
|
the-stack_106_28186 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.device import cuda
import paddle
import unittest
class TestCurrentStream(unittest.TestCase):
def test_current_stream(self):
if paddle.is_compiled_with_cuda():
s = cuda.current_stream()
self.assertTrue(isinstance(s, cuda.Stream))
s1 = cuda.current_stream(0)
self.assertTrue(isinstance(s1, cuda.Stream))
s2 = cuda.current_stream(paddle.CUDAPlace(0))
self.assertTrue(isinstance(s2, cuda.Stream))
self.assertEqual(s1, s2)
self.assertRaises(ValueError, cuda.current_stream, "gpu:0")
class TestSynchronize(unittest.TestCase):
def test_synchronize(self):
if paddle.is_compiled_with_cuda():
self.assertIsNone(cuda.synchronize())
self.assertIsNone(cuda.synchronize(0))
self.assertIsNone(cuda.synchronize(paddle.CUDAPlace(0)))
self.assertRaises(ValueError, cuda.synchronize, "gpu:0")
class TestCUDAStream(unittest.TestCase):
def test_cuda_stream(self):
if paddle.is_compiled_with_cuda():
s = paddle.device.cuda.Stream()
self.assertIsNotNone(s)
def test_cuda_stream_synchronize(self):
if paddle.is_compiled_with_cuda():
s = paddle.device.cuda.Stream()
e1 = paddle.device.cuda.Event(True, False, False)
e2 = paddle.device.cuda.Event(True, False, False)
e1.record(s)
e1.query()
tensor1 = paddle.to_tensor(paddle.rand([1000, 1000]))
tensor2 = paddle.matmul(tensor1, tensor1)
s.synchronize()
e2.record(s)
e2.synchronize()
self.assertTrue(s.query())
def test_cuda_stream_wait_event_and_record_event(self):
if paddle.is_compiled_with_cuda():
s1 = cuda.Stream(0)
tensor1 = paddle.to_tensor(paddle.rand([1000, 1000]))
tensor2 = paddle.matmul(tensor1, tensor1)
e1 = cuda.Event(False, False, False)
s1.record_event(e1)
s2 = cuda.Stream(0)
s2.wait_event(e1)
s2.synchronize()
self.assertTrue(e1.query() and s1.query() and s2.query())
class TestCUDAEvent(unittest.TestCase):
def test_cuda_event(self):
if paddle.is_compiled_with_cuda():
e = paddle.device.cuda.Event(True, False, False)
self.assertIsNotNone(e)
s = paddle.device.cuda.current_stream()
def test_cuda_event_methods(self):
if paddle.is_compiled_with_cuda():
e = paddle.device.cuda.Event(True, False, False)
s = paddle.device.cuda.current_stream()
event_query_1 = e.query()
tensor1 = paddle.to_tensor(paddle.rand([1000, 1000]))
tensor2 = paddle.matmul(tensor1, tensor1)
s.record_event(e)
e.synchronize()
event_query_2 = e.query()
self.assertTrue(event_query_1)
self.assertTrue(event_query_2)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_28187 | import torch
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
# self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
# dilate=replace_stride_with_dilation[1])
# self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
# dilate=replace_stride_with_dilation[2])
# self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
# dilate=replace_stride_with_dilation[1])
# self.check_layer = [self.layer3]
# self.check_layer_2 = [self._make_layer(block, 256, layers[2], stride=2,
# dilate=replace_stride_with_dilation[1])]
self.layer3s = self._make_copied_layers(len(num_classes), block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4s = self._make_copied_layers(len(num_classes), block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpools = [nn.AdaptiveAvgPool2d((1, 1)) for num_class in num_classes]
self.fcs = [nn.Linear(512 * block.expansion, num_class) for num_class in num_classes]
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _make_copied_layers(self, count, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
branches = []
orig_in_planes = self.inplanes
for i in range(count):
self.inplanes = orig_in_planes
print('inplanes',self.inplanes)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
branch = nn.Sequential(*layers)
branches.append(branch)
return branches
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x_3 = [layer3(x) for layer3 in self.layer3s]
x_4 = [self.layer4s[i](x_3[i]) for i in range(len(x_3))]
x_pool = [self.avgpools[i](x_4[i]) for i in range(len(x_4))]
# for i in range(len(x_3)):
# layer4 = self.layer4s[i]
# x_4 =
# x_4 = [self.layer4(x)
# x_pool = [self.avgpool(x)]
x_flats = [torch.flatten(x_pool[i], 1) for i in range(len(x_pool))]
outs = [self.fcs[i](x_flats[i]) for i in range(len(x_flats))]
# outs = [fc_layer(x) for fc_layer in self.fcs]
# x_1 = self.fc(x)
# x_2 = self.fc_2(x)
# x_3 = self.fc_3(x)
return outs
def forward(self, x):
return self._forward_impl(x)
def get_activations(self, x, layer_name):
activations_dict = {}
x = self.conv1(x)
if layer_name == 'conv1':
activations_dict['trunk_conv1'] = x
x = self.bn1(x)
if layer_name == 'bn1':
activations_dict['trunk_bn1'] = x
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
if layer_name == 'layer1':
activations_dict['trunk_layer1'] = x
x = self.layer2(x)
if layer_name == 'layer2':
activations_dict['trunk_layer2'] = x
x_3 = [layer3(x) for layer3 in self.layer3s]
if layer_name == 'layer3':
for i in range(len(x_3)):
activations_dict['branch_%s_layer3'%i] = x_3[i]
x_4 = [self.layer4s[i](x_3[i]) for i in range(len(x_3))]
if layer_name == 'layer4':
for i in range(len(x_4)):
activations_dict['branch_%s_layer4'%i] = x_4[i]
x_pool = [self.avgpools[i](x_4[i]) for i in range(len(x_4))]
if layer_name == 'avgpool':
for i in range(len(x_pool)):
activations_dict['branch_%s_avgpool'%i] = x_pool[i]
x_flats = [torch.flatten(x_pool[i], 1) for i in range(len(x_pool))]
if layer_name == 'flatten':
for i in range(len(x_flats)):
activations_dict['branch_%s_avgpool'%i] = x_flats[i]
outs = [self.fcs[i](x_flats[i]) for i in range(len(x_flats))]
if layer_name == 'fcs':
for i in range(len(outs)):
activations_dict['branch_%s_fcs'%i] = outs[i]
if len(activations_dict.keys()) == 0:
print('layer_name not valid, please check.')
sys.exit()
return activations_dict
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
# if pretrained:
# state_dict = load_state_dict_from_url(model_urls[arch],
# progress=progress)
# model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
|
the-stack_106_28190 | """Moderate Ray Tune run (32 trials, 4 actors).
This training run will start 32 Ray Tune trials, each starting 4 actors.
The cluster comprises 32 nodes.
Test owner: krfricke
Acceptance criteria: Should run through and report final results, as well
as the Ray Tune results table. No trials should error. All trials should
run in parallel.
"""
from collections import Counter
import json
import os
import time
import ray
from ray import tune
from xgboost_ray import RayParams
from ray.util.xgboost.release_test_util import train_ray
def train_wrapper(config, ray_params):
train_ray(
path="/data/classification.parquet",
num_workers=None,
num_boost_rounds=100,
num_files=64,
regression=False,
use_gpu=False,
ray_params=ray_params,
xgboost_params=config,
)
if __name__ == "__main__":
search_space = {
"eta": tune.loguniform(1e-4, 1e-1),
"subsample": tune.uniform(0.5, 1.0),
"max_depth": tune.randint(1, 9),
}
ray.init(address="auto")
ray_params = RayParams(
elastic_training=False,
max_actor_restarts=2,
num_actors=4,
cpus_per_actor=1,
gpus_per_actor=0,
)
start = time.time()
analysis = tune.run(
tune.with_parameters(train_wrapper, ray_params=ray_params),
config=search_space,
num_samples=32,
resources_per_trial=ray_params.get_tune_resources(),
)
taken = time.time() - start
result = {
"time_taken": taken,
"trial_states": dict(Counter([trial.status for trial in analysis.trials])),
}
test_output_json = os.environ.get("TEST_OUTPUT_JSON", "/tmp/tune_32x4.json")
with open(test_output_json, "wt") as f:
json.dump(result, f)
print("PASSED.")
|
the-stack_106_28191 | import argparse
import os
import pickle as pkl
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import skimage
import skimage.io
from skimage import feature
from sklearn import svm, metrics
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.utils import Bunch
def lbp_describe(image, p, r, eps=1e-7, vis=False):
lbp = feature.local_binary_pattern(image, p, r, method='uniform')
hist, _ = np.histogram(lbp.ravel(), bins=np.arange(0, p + 3), range=(0, p + 2))
hist = hist.astype("float")
hist /= (hist.sum() + eps)
return (hist, lbp / (p + 2) * 255) if vis else hist
# noinspection DuplicatedCode
def prepare_image_feature(container_path, dimension=(64, 64), re_gen=False, use_existed=False):
image_dir = Path(container_path) # /train or /test
# folders = [directory for directory in image_dir.iterdir() if
# directory.is_dir()] # 文件夹列表:[/RecapturedImage, /SingleCapturedImage]
# TODO: Change name here
class_array = ['RecapturedImage', 'SingleCapturedImage']
folders = []
for cls in class_array:
p = image_dir.joinpath(cls)
folders.append(p)
categories = [fo.name for fo in folders] # 文件夹名称列表:['Recapture..', 'singleCapture..']
descr = "A image classification dataset"
lbp_param = [(8, 1), (16, 2), (24, 3), (24, 4)]
images = []
flat_data = []
lbp_data = []
lbp_hist_data = []
target = []
file_names = []
file = Path()
for label, direc in enumerate(folders): # label 0,1为文件夹下标,direc为文件夹:/RecapturedImage /Single..
lbp_file_path = image_dir.joinpath(direc, 'lbp_feature_80.pkl') # 生成pkl文件
if lbp_file_path.exists() and lbp_file_path.stat().st_size > 0:
with open(lbp_file_path, 'rb') as cache_file:
lbp_dic = pkl.load(cache_file)
else:
lbp_dic = dict()
dir_stack = [direc]
while len(dir_stack):
cur_dir = dir_stack.pop() # 当前文件夹
print(f'iter into {cur_dir.name}')
for j, dir in enumerate(cur_dir.iterdir()):
if dir.is_dir(): # /recapture里面还有文件夹
if dir.name != 'archive':
print(f'find folder {dir.name}')
dir_stack.append(dir)
continue
elif dir.is_file(): # 检查到文件,判断是否是图片
file = dir
if file.suffix not in ('.JPG', '.jpg', '.png', '.PNG'):
print(f'skip non image file {file}')
continue
if file in lbp_dic and not re_gen: # 旧的lbp_dic中含有当前图片lbp,读取至lbp_hist_data中,然后跳至下一张图片
lbp_hist_data.append(lbp_dic[file])
target.append(label)
file_names.append(file)
continue
if use_existed:
continue
print(f'start reading {j}: {file}')
image = skimage.io.imread(file)
if len(image) == 2:
image = image[0]
print(image.shape)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
lbp_feature = np.concatenate([lbp_describe(gray, p, r) for p, r in lbp_param]) # 当前图片的lbp
lbp_dic[file] = lbp_feature # lbp_dic中含有file下标
print(f'dumping...')
with open(lbp_file_path, 'wb') as cache_file:
pkl.dump(lbp_dic, cache_file)
print(f'lbp feature: {len(lbp_feature)}: {lbp_feature}')
# img_resized = resize(image, dimension, anti_aliasing=True, mode='reflect')
# print(f'size after resize {img_resized.shape}')
# flat_data.append(img_resized.flatten())
# images.append(img_resized)
lbp_hist_data.append(lbp_feature) # lbp_hist_data中没有file下标
target.append(label)
file_names.append(file)
lbp_hist_data = np.array(lbp_hist_data)
target = np.array(target)
# flat_data = np.array(flat_data)
# images = np.array(images)
return Bunch(
lbp_hist_data=lbp_hist_data,
target=target,
target_names=categories,
file_list=file_names,
# images=images,
DESCR=descr)
def vis_one(img_path, model_path, log_lbp=False):
img_path = Path(img_path)
file_name = img_path.stem
base_path = img_path.parent.parent.parent
with open(model_path, 'rb') as f:
clf = pkl.load(f)
print(f'processing image {img_path}')
image = skimage.io.imread(img_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
lbp_param = [(8, 1), (16, 2), (24, 3), (24, 4)]
lbp_image_pair = [lbp_describe(gray, p, r, vis=True) for p, r in lbp_param]
lbp_img_list = list(zip(*lbp_image_pair))
lbp_feature = np.concatenate(lbp_img_list[0])
if log_lbp:
with open(f'{base_path}/vis_lbp_hist.txt', 'a') as f:
f.write(f'{img_path} {lbp_feature}\n')
for i, lbp_img in enumerate(lbp_img_list[1]):
write_path = os.path.join(base_path, 'res', 'vis', f'{file_name}_lbp{str(lbp_param[i])}.png')
print(f'writing {i}th LBP map to {write_path}')
cv2.imwrite(write_path, lbp_img)
preds = clf.predict(np.array([lbp_feature]))
print(f'Predicted class: {class_array[int(preds)]}')
return preds
def clear_folder(path):
if path.exists():
os.system(f'rm -r {path}')
os.makedirs(path)
# noinspection DuplicatedCode
def test_all(test_path, model_path):
test_path = Path(test_path)
res_path = test_path.parent.joinpath('res') # res文件夹为SVM按预测类别整理的
# TODO: Change name here
class_array = ['RecapturedImage', 'SingleCapturedImage']
dst_paths = []
for cls in class_array: # 此循环只用于清空上一次预测的目录
path = res_path.joinpath(cls)
clear_folder(path)
dst_paths.append(path)
with open(model_path, 'rb') as f:
clf = pkl.load(f) # clf为SVM参数
test_dataset = prepare_image_feature(test_path)
preds = clf.predict(test_dataset.lbp_hist_data) # 为test文件夹所有图片的预测类别
print(preds)
print(test_dataset.target) # 为test文件夹所有图片的真实类别---[0 0 0 0 0 ... 1 1 1 1 1 ...]
print(test_dataset.target_names) # ['RecapturedImage', 'SingleCapturedImage']
print(test_dataset.file_list)
print(f'Pred: GroundTruth: filepath')
for pred_label, name, gnd in zip(preds, test_dataset.file_list, test_dataset.target):
print(f'{pred_label}:\t\t{gnd}:\t\t{name}') # 预测类别 实际类别 文件名
# os.system(f'cp "{name}" "{dst_paths[pred_label]}/"')
print(
f"Classification report - \n{metrics.classification_report(test_dataset.target, preds, target_names=class_array)}\n")
print("Confusion matrix -\n")
print(pd.crosstab(pd.Series(test_dataset.target, name='Actual'), pd.Series(preds, name='Predicted')))
return preds
def parse_args():
parser = argparse.ArgumentParser(
description='Train a network with Detectron'
)
parser.add_argument(
'--testone',
dest='test_img_path',
help='image path for test',
default='',
type=str
)
parser.add_argument(
'--testall',
dest='test_all',
help='test all images',
action='store_true'
)
parser.add_argument(
'--retrain',
dest='retrain',
help='retrain the model',
action='store_true'
)
return parser.parse_args()
if __name__ == '__main__':
data_path = '/home/lyf/dataset/train'
test_data_path = '/home/lyf/dataset/test'
# TODO: Change svm model name
model_path = './svm_lbp.pkl'
# TODO: Change name here
class_array = ['RecapturedImage', 'SingleCapturedImage']
retrain = True
args = parse_args()
# noinspection DuplicatedCode
if args.retrain:
image_dataset = prepare_image_feature(data_path)
X_train, X_val, y_train, y_val = train_test_split(image_dataset.lbp_hist_data, image_dataset.target,
test_size=0.3)
c_range = np.logspace(-5, 15, 11, base=2)
gamma_range = np.logspace(-9, 3, 13, base=2)
param_grid = [{'kernel': ['rbf'], 'C': c_range, 'gamma': gamma_range}]
svc = svm.SVC(kernel='rbf', class_weight='balanced')
grid = GridSearchCV(svc, param_grid, n_jobs=-1, cv=3)
clf = grid.fit(X_train, y_train)
with open(model_path, 'wb') as f:
pkl.dump(clf, f)
y_pred = clf.predict(X_val)
print(
f"Classification report - \n{clf}:\n{metrics.classification_report(y_val, y_pred, target_names=class_array)}\n")
print("Confusion matrix -\n")
print(pd.crosstab(pd.Series(y_val, name='Actual'), pd.Series(y_pred, name='Predicted')))
if os.path.exists(model_path):
if args.test_img_path != '':
# vis_one(os.path.join(test_data_path, 'RecapturedImages', '7 (1).jpg'), model_path)
vis_one(args.test_img_path, model_path)
elif args.test_all:
preds = test_all(test_data_path, model_path)
|
the-stack_106_28192 | # import random module
import random
# compliment list
compliments = ['Amazing!', 'Great Work!', 'Fantastic!',
"Incredible!", "Nice Job!", "Excellent!", "Stupendous!"]
# global score var
# this will be useful when we use it in functions.
score = 0
# main function
def quizQuestion(guess, answer):
# here is where we put the global score in place
global score
# we define a still guessing function to see if the player is still trying.
stillGuessing = True
# attempt counter, user will have 3 attempts to solve each question.
attempt = 0
# check
# while the user is stillGuessing and the number of attempts completed are less than 3, execute the check logic.
while stillGuessing and attempt < 3:
# ignores case to make code smarter and UI much better
if guess.lower() == answer.lower():
# if guess is correct, print out a compliment and add 1 to the score var
print(f"{random.choice(compliments)}")
score += 1
elif attempt < 2:
# show a motivational message to try again, and shows how many attempts are remaining
guess = (
f"Try Again! I know you can do it! \n Number of guesses remaining --> {attempt} \n")
# add one to attempt counter
attempt == attempt + 1
# if attempt == 3, show correct answer
if attempt == 3:
print(
f"Oops, looks like all your attempts are gone! The correct answer was {answer}")
# question creation begins here
# first question
question1 = input('What is the biggest country in the world? \n')
quizQuestion(question1, 'Russia')
# second question
question2 = input('Which country makes the most coffee in the world? \n')
quizQuestion(question2, 'Brazil')
# print score, just for fun
outroMessage = f'Your score is... --> {score}'
print(outroMessage)
|
the-stack_106_28193 | import collections
import re
from functools import partial
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DatabaseError, NotSupportedError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import F, OrderBy, RawSQL, Ref, Value
from django.db.models.functions import Cast, Random
from django.db.models.query_utils import Q, select_related_descend
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.transaction import TransactionManagementError
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
from django.utils.regex_helper import _lazy_re_compile
class SQLCompiler:
# Multiline ordering SQL clause may appear from RawSQL.
ordering_parts = _lazy_re_compile(
r'^(.*)\s(?:ASC|DESC).*',
re.MULTILINE | re.DOTALL,
)
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self._meta_ordering = None
def setup_query(self):
if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select()
self.col_count = len(self.select)
def pre_sql_setup(self):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query()
order_by = self.get_order_by()
self.where, self.having = self.query.where.split_having()
extra_select = self.get_extra_select(order_by, self.select)
self.has_extra_select = bool(extra_select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, 'as_sql'):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
ref_sources = {
expr.source for expr in expressions if isinstance(expr, Ref)
}
for expr, _, _ in select:
# Skip members of the select clause that are already included
# by reference.
if expr in ref_sources:
continue
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
for expr, (sql, params, is_ref) in order_by:
# Skip References to the select clause, as all expressions in the
# select clause are already part of the group by.
if not is_ref:
expressions.extend(expr.get_group_by_cols())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
sql, params = self.compile(expr)
sql, params = expr.select_format(self, sql, params)
params_hash = make_hashable(params)
if (sql, params_hash) not in seen:
result.append((sql, params))
seen.add((sql, params_hash))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# Determine if the main model's primary key is in the query.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (getattr(expr, 'target', None) == self.query.model._meta.pk and
getattr(expr, 'alias', None) == self.query.base_table):
pk = expr
break
# If the main model's primary key is in the query, group by that
# field, HAVING expressions, and expressions associated with tables
# that don't have a primary key included in the grouped columns.
if pk:
pk_aliases = {
expr.alias for expr in expressions
if hasattr(expr, 'target') and expr.target.primary_key
}
expressions = [pk] + [
expr for expr in expressions
if expr in having or (
getattr(expr, 'alias', None) is not None and expr.alias not in pk_aliases
)
]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
# Unmanaged models are excluded because they could be representing
# database views on which the optimization might not be allowed.
pks = {
expr for expr in expressions
if (
hasattr(expr, 'target') and
expr.target.primary_key and
self.connection.features.allows_group_by_selected_pks_on_model(expr.target.model)
)
}
aliases = {expr.alias for expr in pks}
expressions = [
expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases
]
return expressions
def get_select(self):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- The base model of the query.
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
cols = self.get_default_columns()
else:
# self.query.select is a special case. These columns never go to
# any model.
cols = self.query.select
if cols:
select_list = []
for col in cols:
select_list.append(select_idx)
select.append((col, None))
select_idx += 1
klass_info = {
'model': self.query.model,
'select_fields': select_list,
}
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info['related_klass_infos'] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info['related_klass_infos']:
if ki['from_parent']:
ki['select_fields'] = (klass_info['select_fields'] +
ki['select_fields'])
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for col, alias in select:
try:
sql, params = self.compile(col)
except EmptyResultSet:
# Select a predicate that's always False.
sql, params = '0', ()
else:
sql, params = col.select_format(self, sql, params)
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def get_order_by(self):
"""
Return a list of 2-tuples of form (expr, (sql, params, is_ref)) for the
ORDER BY clause.
The order_by clause can alter the select clause (for example it
can add aliases to clauses that do not yet have one, or it can
add totally new select clauses).
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
elif self.query.order_by:
ordering = self.query.order_by
elif self.query.get_meta().ordering:
ordering = self.query.get_meta().ordering
self._meta_ordering = ordering
else:
ordering = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
order_by = []
for field in ordering:
if hasattr(field, 'resolve_expression'):
if isinstance(field, Value):
# output_field must be resolved for constants.
field = Cast(field, field.output_field)
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field = field.copy()
field.reverse_ordering()
order_by.append((field, False))
continue
if field == '?': # random
order_by.append((OrderBy(Random()), False))
continue
col, order = get_order_dir(field, asc)
descending = order == 'DESC'
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
order_by.append((
OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending),
True))
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT
# clause.
expr = self.query.annotations[col]
if isinstance(expr, Value):
# output_field must be resolved for constants.
expr = Cast(expr, expr.output_field)
order_by.append((OrderBy(expr, descending=descending), False))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
order_by.append((
OrderBy(
RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []),
descending=descending
), False))
continue
if not self.query.extra or col not in self.query.extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
order_by.extend(self.find_ordering_name(
field, self.query.get_meta(), default_order=asc))
else:
if col not in self.query.extra_select:
order_by.append((
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False))
else:
order_by.append((
OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending),
True))
result = []
seen = set()
for expr, is_ref in order_by:
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
if self.query.combinator and self.select:
src = resolved.get_source_expressions()[0]
expr_src = expr.get_source_expressions()[0]
# Relabel order by columns to raw numbers if this is a combined
# query; necessary since the columns can't be referenced by the
# fully qualified name and the simple column names may collide.
for idx, (sel_expr, _, col_alias) in enumerate(self.select):
if is_ref and col_alias == src.refs:
src = src.source
elif col_alias and not (
isinstance(expr_src, F) and col_alias == expr_src.name
):
continue
if src == sel_expr:
resolved.set_source_expressions([RawSQL('%d' % (idx + 1), ())])
break
else:
if col_alias:
raise DatabaseError('ORDER BY term does not match any column in the result set.')
# Add column used in ORDER BY clause without an alias to
# the selected columns.
self.query.add_select_col(src)
resolved.set_source_expressions([RawSQL('%d' % len(self.query.select), ())])
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql)[1]
params_hash = make_hashable(params)
if (without_ordering, params_hash) in seen:
continue
seen.add((without_ordering, params_hash))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
if self.query.distinct and not self.query.distinct_fields:
select_sql = [t[1] for t in select]
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql)[1]
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select or (
self.query.external_aliases.get(name) and name not in self.query.table_map)):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node):
vendor_impl = getattr(node, 'as_' + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
return sql, params
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection)
for query in self.query.combined_queries if not query.is_empty()
]
if not features.supports_slicing_ordering_in_compound:
for query, compiler in zip(self.query.combined_queries, compilers):
if query.low_mark or query.high_mark:
raise DatabaseError('LIMIT/OFFSET not allowed in subqueries of compound statements.')
if compiler.get_order_by():
raise DatabaseError('ORDER BY not allowed in subqueries of compound statements.')
parts = ()
for compiler in compilers:
try:
# If the columns list is limited, then all combined queries
# must have the same columns list. Set the selects defined on
# the query on all combined queries, if not already set.
if not compiler.query.values_select and self.query.values_select:
compiler.query = compiler.query.clone()
compiler.query.set_values((
*self.query.extra_select,
*self.query.values_select,
*self.query.annotation_select,
))
part_sql, part_args = compiler.as_sql()
if compiler.query.combinator:
# Wrap in a subquery if wrapping in parentheses isn't
# supported.
if not features.supports_parentheses_in_compound:
part_sql = 'SELECT * FROM ({})'.format(part_sql)
# Add parentheses when combining with compound query if not
# already added for all compound queries.
elif not features.supports_slicing_ordering_in_compound:
part_sql = '({})'.format(part_sql)
parts += ((part_sql, part_args),)
except EmptyResultSet:
# Omit the empty queryset with UNION and with DIFFERENCE if the
# first queryset is nonempty.
if combinator == 'union' or (combinator == 'difference' and parts):
continue
raise
if not parts:
raise EmptyResultSet
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == 'union':
combinator_sql += ' ALL'
braces = '({})' if features.supports_slicing_ordering_in_compound else '{}'
sql_parts, args_parts = zip(*((braces.format(sql), args) for sql, args in parts))
result = [' {} '.format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup()
for_update_part = None
# Is a LIMIT/OFFSET clause needed?
with_limit_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark)
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, 'supports_select_{}'.format(combinator)):
raise NotSupportedError('{} is not supported on this database backend.'.format(combinator))
result, params = self.get_combinator_sql(combinator, self.query.combinator_all)
else:
distinct_fields, distinct_params = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct'
# (see docstring of get_from_clause() for details).
from_, f_params = self.get_from_clause()
where, w_params = self.compile(self.where) if self.where is not None else ("", [])
having, h_params = self.compile(self.having) if self.having is not None else ("", [])
result = ['SELECT']
params = []
if self.query.distinct:
distinct_result, distinct_params = self.connection.ops.distinct_sql(
distinct_fields,
distinct_params,
)
result += distinct_result
params += distinct_params
out_cols = []
col_idx = 1
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias))
elif with_col_aliases:
s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result += [', '.join(out_cols), 'FROM', *from_]
params.extend(f_params)
if self.query.select_for_update and self.connection.features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError('select_for_update cannot be used outside of a transaction.')
if with_limit_offset and not self.connection.features.supports_select_for_update_with_limit:
raise NotSupportedError(
'LIMIT/OFFSET is not supported with '
'select_for_update on this database backend.'
)
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
of = self.query.select_for_update_of
no_key = self.query.select_for_no_key_update
# If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the
# backend doesn't support it, raise NotSupportedError to
# prevent a possible deadlock.
if nowait and not self.connection.features.has_select_for_update_nowait:
raise NotSupportedError('NOWAIT is not supported on this database backend.')
elif skip_locked and not self.connection.features.has_select_for_update_skip_locked:
raise NotSupportedError('SKIP LOCKED is not supported on this database backend.')
elif of and not self.connection.features.has_select_for_update_of:
raise NotSupportedError('FOR UPDATE OF is not supported on this database backend.')
elif no_key and not self.connection.features.has_select_for_no_key_update:
raise NotSupportedError(
'FOR NO KEY UPDATE is not supported on this '
'database backend.'
)
for_update_part = self.connection.ops.for_update_sql(
nowait=nowait,
skip_locked=skip_locked,
of=self.get_select_for_update_of_arguments(),
no_key=no_key,
)
if for_update_part and self.connection.features.for_update_after_from:
result.append(for_update_part)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError('annotate() + distinct(fields) is not implemented.')
order_by = order_by or self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
if self._meta_ordering:
order_by = None
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if self.query.explain_query:
result.insert(0, self.connection.ops.explain_query_prefix(
self.query.explain_format,
**self.query.explain_options
))
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limit_offset:
result.append(self.connection.ops.limit_offset_sql(self.query.low_mark, self.query.high_mark))
if for_update_part and not self.connection.features.for_update_after_from:
result.append(for_update_part)
if self.query.subquery and extra_select:
# If the query is used as a subquery, the extra selects would
# result in more columns than the left-hand side expression is
# expecting. This can happen when a subquery uses a combination
# of order_by() and distinct(), forcing the ordering expressions
# to be selected as well. Wrap the query in another subquery
# to exclude extraneous selects.
sub_selects = []
sub_params = []
for index, (select, _, alias) in enumerate(self.select, start=1):
if not alias and with_col_aliases:
alias = 'col%d' % index
if alias:
sub_selects.append("%s.%s" % (
self.connection.ops.quote_name('subquery'),
self.connection.ops.quote_name(alias),
))
else:
select_clone = select.relabeled_clone({select.alias: 'subquery'})
subselect, subparams = select_clone.as_sql(self, self.connection)
sub_selects.append(subselect)
sub_params.extend(subparams)
return 'SELECT %s FROM (%s) subquery' % (
', '.join(sub_selects),
' '.join(result),
), tuple(sub_params + params)
return ' '.join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
start_alias = start_alias or self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if from_parent and model is not None and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
result = []
params = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _, transform_function = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(name)
else:
r, p = self.compile(transform_function(target, alias))
result.append(r)
params.append(p)
return result, params
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = order == 'DESC'
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts, transform_function = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless it is the pk
# shortcut or the attribute name of the field that is specified.
if (
field.is_relation and
opts.ordering and
getattr(field, 'attname', None) != pieces[-1] and
name != 'pk'
):
# Firstly, avoid infinite loops.
already_seen = already_seen or set()
join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
if hasattr(item, 'resolve_expression') and not isinstance(item, OrderBy):
item = item.desc() if descending else item.asc()
if isinstance(item, OrderBy):
results.append((item, False))
continue
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(OrderBy(transform_function(t, alias), descending=descending), False) for t in targets]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
alias = alias or self.query.get_initial_alias()
field, targets, opts, joins, path, transform_function = self.query.setup_joins(pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts, transform_function
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in tuple(self.query.alias_map):
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
result.append(', %s' % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects if f.field.unique
)
return chain(direct_choices, reverse_choices, self.query._filtered_relations)
related_klass_infos = []
if not restricted and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
restricted = isinstance(self.query.select_related, dict)
if restricted:
requested = self.query.select_related
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info['related_klass_infos'] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s" % (
f.name,
", ".join(_get_field_choices()) or '(none)',
)
)
else:
next = False
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
klass_info = {
'model': f.remote_field.model,
'field': f,
'reverse': False,
'local_setter': f.set_cached_value,
'remote_setter': f.remote_field.set_cached_value if f.unique else lambda x, y: None,
'from_parent': False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_klass_infos = self.get_related_selections(
select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
join_info = self.query.setup_joins([related_field_name], opts, root_alias)
alias = join_info.joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
'model': model,
'field': f,
'reverse': True,
'local_setter': f.remote_field.set_cached_value,
'remote_setter': f.set_cached_value,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1,
next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
def local_setter(obj, from_obj):
# Set a reverse fk object when relation is non-empty.
if from_obj:
f.remote_field.set_cached_value(from_obj, obj)
def remote_setter(name, obj, from_obj):
setattr(from_obj, name, obj)
for name in list(requested):
# Filtered relations work only on the topmost level.
if cur_depth > 1:
break
if name in self.query._filtered_relations:
fields_found.add(name)
f, _, join_opts, joins, _, _ = self.query.setup_joins([name], opts, root_alias)
model = join_opts.model
alias = joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
'model': model,
'field': f,
'reverse': True,
'local_setter': local_setter,
'remote_setter': partial(remote_setter, name),
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_requested = requested.get(name, {})
next_klass_infos = self.get_related_selections(
select, opts=model._meta, root_alias=alias,
cur_depth=cur_depth + 1, requested=next_requested,
restricted=restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
'Invalid field name(s) given in select_related: %s. '
'Choices are: %s' % (
', '.join(invalid_fields),
', '.join(_get_field_choices()) or '(none)',
)
)
return related_klass_infos
def get_select_for_update_of_arguments(self):
"""
Return a quoted list of arguments for the SELECT FOR UPDATE OF part of
the query.
"""
def _get_parent_klass_info(klass_info):
concrete_model = klass_info['model']._meta.concrete_model
for parent_model, parent_link in concrete_model._meta.parents.items():
parent_list = parent_model._meta.get_parent_list()
yield {
'model': parent_model,
'field': parent_link,
'reverse': False,
'select_fields': [
select_index
for select_index in klass_info['select_fields']
# Selected columns from a model or its parents.
if (
self.select[select_index][0].target.model == parent_model or
self.select[select_index][0].target.model in parent_list
)
],
}
def _get_first_selected_col_from_model(klass_info):
"""
Find the first selected column from a model. If it doesn't exist,
don't lock a model.
select_fields is filled recursively, so it also contains fields
from the parent models.
"""
concrete_model = klass_info['model']._meta.concrete_model
for select_index in klass_info['select_fields']:
if self.select[select_index][0].target.model == concrete_model:
return self.select[select_index][0]
def _get_field_choices():
"""Yield all allowed field paths in breadth-first search order."""
queue = collections.deque([(None, self.klass_info)])
while queue:
parent_path, klass_info = queue.popleft()
if parent_path is None:
path = []
yield 'self'
else:
field = klass_info['field']
if klass_info['reverse']:
field = field.remote_field
path = parent_path + [field.name]
yield LOOKUP_SEP.join(path)
queue.extend(
(path, klass_info)
for klass_info in _get_parent_klass_info(klass_info)
)
queue.extend(
(path, klass_info)
for klass_info in klass_info.get('related_klass_infos', [])
)
result = []
invalid_names = []
for name in self.query.select_for_update_of:
klass_info = self.klass_info
if name == 'self':
col = _get_first_selected_col_from_model(klass_info)
else:
for part in name.split(LOOKUP_SEP):
klass_infos = (
*klass_info.get('related_klass_infos', []),
*_get_parent_klass_info(klass_info),
)
for related_klass_info in klass_infos:
field = related_klass_info['field']
if related_klass_info['reverse']:
field = field.remote_field
if field.name == part:
klass_info = related_klass_info
break
else:
klass_info = None
break
if klass_info is None:
invalid_names.append(name)
continue
col = _get_first_selected_col_from_model(klass_info)
if col is not None:
if self.connection.features.select_for_update_of_column:
result.append(self.compile(col)[0])
else:
result.append(self.quote_name_unless_alias(col.alias))
if invalid_names:
raise FieldError(
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: %s.' % (
', '.join(invalid_names),
', '.join(_get_field_choices()),
)
)
return result
def deferred_to_columns(self):
"""
Convert the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Return the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, rows, converters):
connection = self.connection
converters = list(converters.items())
for row in map(list, rows):
for pos, (convs, expression) in converters:
value = row[pos]
for converter in convs:
value = converter(value, expression, connection)
row[pos] = value
yield row
def results_iter(self, results=None, tuple_expected=False, chunked_fetch=False,
chunk_size=GET_ITERATOR_CHUNK_SIZE):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size)
fields = [s[0] for s in self.select[0:self.col_count]]
converters = self.get_converters(fields)
rows = chain.from_iterable(results)
if converters:
rows = self.apply_converters(rows, converters)
if tuple_expected:
rows = map(tuple, rows)
return rows
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
result_type = result_type or NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Give the caller the cursor to process and close.
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0:self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor, self.connection.features.empty_fetchmany_value,
self.col_count if self.has_extra_select else None,
chunk_size,
)
if not chunked_fetch or not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested,
# unless the database doesn't support it.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
RawSQL('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
def explain_query(self):
result = list(self.execute_sql())
# Some backends return 1 item tuples with strings, and others return
# tuples with integers and strings. Flatten them out into strings.
for row in result[0]:
if not isinstance(row, str):
yield ' '.join(str(c) for c in row)
else:
yield row
class SQLInsertCompiler(SQLCompiler):
returning_fields = None
returning_params = tuple()
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, 'as_sql'):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = '%s', [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, 'resolve_expression'):
value = value.resolve_expression(self.query, allow_joins=False, for_save=True)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
'can only be used to update, not to insert.' % (value, field)
)
if value.contains_aggregate:
raise FieldError(
'Aggregate functions are not allowed in this query '
'(%s=%r).' % (field.name, value)
)
if value.contains_over_clause:
raise FieldError(
'Window expressions are not allowed in this query (%s=%r).'
% (field.name, value)
)
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
insert_statement = self.connection.ops.insert_statement(ignore_conflicts=self.query.ignore_conflicts)
result = ['%s %s' % (insert_statement, qn(opts.db_table))]
fields = self.query.fields or [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if self.query.fields:
value_rows = [
[self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (not self.returning_fields and self.connection.features.has_bulk_insert)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
ignore_conflicts_suffix_sql = self.connection.ops.ignore_conflicts_suffix_sql(
ignore_conflicts=self.query.ignore_conflicts
)
if self.returning_fields and self.connection.features.can_return_columns_from_insert:
if self.connection.features.can_return_rows_from_bulk_insert:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
# Skip empty r_sql to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
r_sql, self.returning_params = self.connection.ops.return_insert_columns(self.returning_fields)
if r_sql:
result.append(r_sql)
params += [self.returning_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, returning_fields=None):
assert not (
returning_fields and len(self.query.objs) != 1 and
not self.connection.features.can_return_rows_from_bulk_insert
)
self.returning_fields = returning_fields
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not self.returning_fields:
return []
if self.connection.features.can_return_rows_from_bulk_insert and len(self.query.objs) > 1:
return self.connection.ops.fetch_returned_insert_rows(cursor)
if self.connection.features.can_return_columns_from_insert:
assert len(self.query.objs) == 1
return [self.connection.ops.fetch_returned_insert_columns(cursor, self.returning_params)]
return [(self.connection.ops.last_insert_id(
cursor, self.query.get_meta().db_table, self.query.get_meta().pk.column
),)]
class SQLDeleteCompiler(SQLCompiler):
@cached_property
def single_alias(self):
# Ensure base table is in aliases.
self.query.get_initial_alias()
return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1
def _as_sql(self, query):
result = [
'DELETE FROM %s' % self.quote_name_unless_alias(query.base_table)
]
where, params = self.compile(query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
if self.single_alias:
return self._as_sql(self.query)
innerq = self.query.clone()
innerq.__class__ = Query
innerq.clear_select_clause()
pk = self.query.model._meta.pk
innerq.select = [
pk.get_col(self.query.get_initial_alias())
]
outerq = Query(self.query.model)
outerq.where = self.query.where_class()
if not self.connection.features.update_can_self_select:
# Force the materialization of the inner query to allow reference
# to the target table on MySQL.
sql, params = innerq.get_compiler(connection=self.connection).as_sql()
innerq = RawSQL('SELECT * FROM (%s) subquery' % sql, params)
outerq.add_q(Q(pk__in=innerq))
return self._as_sql(outerq)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False, for_save=True)
if val.contains_aggregate:
raise FieldError(
'Aggregate functions are not allowed in this query '
'(%s=%r).' % (field.name, val)
)
if val.contains_over_clause:
raise FieldError(
'Window expressions are not allowed in this query '
'(%s=%r).' % (field.name, val)
)
elif hasattr(val, 'prepare_database_save'):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), placeholder % sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
table = self.query.base_table
result = [
'UPDATE %s SET' % qn(table),
', '.join(values),
]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.chain(klass=Query)
query.select_related = False
query.clear_ordering(True)
query.extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super().pre_sql_setup()
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation)
ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
def cursor_iter(cursor, sentinel, col_count, itersize):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel):
yield rows if col_count is None else [r[:col_count] for r in rows]
finally:
cursor.close()
|
the-stack_106_28195 | # -*- coding: utf-8 -*-
from pip_services3_commons.errors.UnauthorizedException import UnauthorizedException
from pip_services3_rpc.services.HttpResponseSender import HttpResponseSender
class BasicAuthorizer:
def anybody(self):
return lambda req, res, next: next()
def signed(self):
def inner(req, res, next):
if req.user is None:
HttpResponseSender.send_error(UnauthorizedException(
None,
'NOT_SIGNED',
'User must be signed in to perform this operation '
).with_status(401))
else:
next()
return inner
|
the-stack_106_28196 | #!/usr/bin/env python3
# cabal_wrapper.py <FILE.JSON>
#
# This wrapper calls Cabal's configure/build/install steps one big
# action so that we don't have to track all inputs explicitly between
# steps. It receives the path to a json file with the following schema:
#
# { "component": string # Cabal component to build.
# , "pkg_name": string # Package ID of the resulting package.
# , "generate_haddock": boolean # Whether to generate haddock documentation.
# , "setup_path": string # Path to Setup.hs
# , "pkg_dir": string # Directory containing the Cabal file
# , "package_db_path": string # Output package DB path.
# , "runghc_args": list of string # Arguments for runghc
# , "extra_args": list of string # Additional args to Setup.hs configure.
# , "path_args": list of string # Additional args to Setup.hs configure where paths need to be prefixed with execroot.
# , "toolchain_info" :
# { "ghc": string # path to ghc
# , "ghc_pkg": string # path to ghc_pkg
# , "runghc": string # path to runghc
# , "ar": string # path to ar
# , "cc": string # path to cc
# , "strip": string # path to strip
# , "is_windows": boolean # this is a windows build
# , "workspace": string # workspace name
# , "ghc_cc_args": list of string # cc flags for ghc
# }
# }
from __future__ import print_function
from contextlib import contextmanager
from glob import glob
import json
import os
import os.path
import re
import shutil
import subprocess
import sys
import tempfile
debug = False
verbose = os.environ.get("CABAL_VERBOSE", "") == "True"
with open(sys.argv.pop(1)) as json_file:
json_args = json.load(json_file)
toolchain_info = json_args["toolchain_info"]
is_windows = toolchain_info["is_windows"]
def run(cmd, *args, **kwargs):
if debug:
print("+ " + " ".join(["'{}'".format(arg) for arg in cmd]), file=sys.stderr)
sys.stderr.flush()
if verbose:
subprocess.run(cmd, check=True, *args, **kwargs)
else:
try:
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, *args, **kwargs)
except subprocess.CalledProcessError as err:
sys.stdout.buffer.write(err.stdout)
sys.stderr.buffer.write(err.stderr)
raise
def find_exe(exe):
if os.path.isfile(exe):
path = os.path.abspath(exe)
elif is_windows and os.path.isfile(exe + ".exe"):
path = os.path.abspath(exe + ".exe")
else:
path = toolchain_info["workspace"] + "/" + exe
if not os.path.isfile(path) and is_windows:
path = toolchain_info["workspace"] + "/" + exe + ".exe"
return path
path_list_sep = ";" if is_windows else ":"
def canonicalize_path(path):
return path_list_sep.join([
os.path.abspath(entry)
for entry in path.split(path_list_sep)
if entry != ""
])
# Remove any relative entries, because we'll be changing CWD shortly.
os.environ["LD_LIBRARY_PATH"] = canonicalize_path(os.getenv("LD_LIBRARY_PATH", ""))
os.environ["LIBRARY_PATH"] = canonicalize_path(os.getenv("LIBRARY_PATH", ""))
os.environ["PATH"] = canonicalize_path(os.getenv("PATH", ""))
component = json_args["component"]
name = json_args["pkg_name"]
haddock = json_args["generate_haddock"]
execroot = os.getcwd()
setup = os.path.join(execroot, json_args["setup_path"])
srcdir = os.path.join(execroot, json_args["pkg_dir"])
# By definition (see ghc-pkg source code).
pkgroot = os.path.realpath(os.path.join(execroot, os.path.dirname(json_args["package_db_path"])))
libdir = os.path.join(pkgroot, "{}_iface".format(name))
dynlibdir = os.path.join(pkgroot, "lib")
bindir = os.path.join(pkgroot, "bin")
datadir = os.path.join(pkgroot, "{}_data".format(name))
package_database = os.path.join(pkgroot, "{}.conf.d".format(name))
haddockdir = os.path.join(pkgroot, "{}_haddock".format(name))
htmldir = os.path.join(pkgroot, "{}_haddock_html".format(name))
runghc_args = json_args["runghc_args"]
runghc = find_exe(toolchain_info["runghc"])
ghc = find_exe(toolchain_info["ghc"])
ghc_pkg = find_exe(toolchain_info["ghc_pkg"])
extra_args = json_args["extra_args"]
path_args = json_args["path_args"]
ar = find_exe(toolchain_info["ar"])
cc = find_exe(toolchain_info["cc"])
strip = find_exe(toolchain_info["strip"])
def recache_db():
run([ghc_pkg, "recache", "--package-db=" + package_database])
recache_db()
@contextmanager
def tmpdir():
"""This is a reimplementation of `tempfile.TemporaryDirectory` because
the latter isn't available in python2
"""
# Build into a sibling path of the final binary output location.
# This is to ensure that relative `RUNPATH`s are valid in the intermediate
# output in the `--builddir` as well as in the final output in `--bindir`.
# Executables are placed into `<distdir>/build/<package-name>/<binary>`.
# Libraries are placed into `<distdir>/build/<library>`. I.e. there is an
# extra subdirectory for libraries.
#
# On Windows we don't do dynamic linking and prefer shorter paths to avoid
# exceeding `MAX_PATH`.
if is_windows:
distdir = tempfile.mkdtemp()
else:
if component.startswith("exe:"):
distdir = tempfile.mkdtemp(dir=os.path.dirname(os.path.dirname(pkgroot)))
else:
distdir = tempfile.mkdtemp(dir=os.path.dirname(pkgroot))
try:
yield distdir
finally:
shutil.rmtree(distdir, ignore_errors = True)
with tmpdir() as distdir:
enable_relocatable_flags = ["--enable-relocatable"] \
if not is_windows else []
# Cabal really wants the current working directory to be directory
# where the .cabal file is located. So we have no choice but to chance
# cd into it, but then we have to rewrite all relative references into
# absolute ones before doing so (using $execroot).
old_cwd = os.getcwd()
os.chdir(srcdir)
os.putenv("RULES_HASKELL_EXEC_ROOT", old_cwd)
os.putenv("HOME", "/var/empty")
os.putenv("TMPDIR", os.path.join(distdir, "tmp"))
os.putenv("TMP", os.path.join(distdir, "tmp"))
os.putenv("TEMP", os.path.join(distdir, "tmp"))
os.makedirs(os.path.join(distdir, "tmp"))
# XXX: Bazel hack
# When cabal_wrapper calls other tools with runfiles, the runfiles are
# searched in the runfile tree of cabal_wrapper unless we clear
# RUNFILES env vars. After clearing the env vars, each tool looks for
# runfiles in its own runfiles tree.
#
# Clearing RUNFILES_DIR is necessary in macos where a wrapper script
# cc-wrapper.sh is used from the cc toolchain.
#
# Clearing RUNFILES_MANIFEST_FILE is necessary in windows where we
# use a wrapper script cc-wrapper-bash.exe which has a different
# manifest file than cabal_wrapper.py.
if "RUNFILES_DIR" in os.environ:
del os.environ["RUNFILES_DIR"]
if "RUNFILES_MANIFEST_FILE" in os.environ:
del os.environ["RUNFILES_MANIFEST_FILE"]
runghc_args = [arg.replace("./", execroot + "/") for arg in runghc_args]
run([runghc] + runghc_args + [setup, "configure", \
component, \
"--verbose=0", \
"--user", \
"--with-compiler=" + ghc,
"--with-hc-pkg=" + ghc_pkg,
"--with-ar=" + ar,
"--with-gcc=" + cc,
"--with-strip=" + strip,
"--enable-deterministic", \
] +
[ "--ghc-option=-optP=-Wno-trigraphs" ] +
[ "--ghc-option=" + flag.replace("$CC", cc) for flag in toolchain_info["ghc_cc_args"] ] +
enable_relocatable_flags + \
[ \
# Make `--builddir` a relative path. Using an absolute path would
# confuse the `RUNPATH` patching logic in `cc_wrapper`. It assumes that
# absolute paths refer the temporary directory that GHC uses for
# intermediate template Haskell outputs. `cc_wrapper` should improved
# in that regard.
"--builddir=" + (os.path.relpath(distdir) if not is_windows else distdir), \
"--prefix=" + pkgroot, \
"--libdir=" + libdir, \
"--dynlibdir=" + dynlibdir, \
"--libsubdir=", \
"--bindir=" + bindir, \
"--datadir=" + datadir, \
# Note, setting --datasubdir is required to work around
# https://github.com/haskell/cabal/issues/6235
"--datasubdir=", \
"--haddockdir=" + haddockdir, \
"--htmldir=" + htmldir, \
"--package-db=clear", \
"--package-db=global", \
] + \
extra_args + \
[ arg.replace("=", "=" + execroot + "/") for arg in path_args ] + \
[ "--package-db=" + package_database ], # This arg must come last.
)
run([runghc] + runghc_args + [setup, "build", "--verbose=0", "--builddir=" + distdir])
if haddock:
run([runghc] + runghc_args + [setup, "haddock", "--verbose=0", "--builddir=" + distdir])
run([runghc] + runghc_args + [setup, "install", "--verbose=0", "--builddir=" + distdir])
# Bazel builds are not sandboxed on Windows and can be non-sandboxed on
# other OSs. Operations like executing `configure` scripts can modify the
# source tree. If the `srcs` attribute uses a glob like `glob(["**"])`,
# then these modified files will enter `srcs` on the next execution and
# invalidate the cache. To avoid this we remove generated files.
run([runghc] + runghc_args + [setup, "clean", "--verbose=0", "--builddir=" + distdir])
os.chdir(old_cwd)
# XXX Cabal has a bizarre layout that we can't control directly. It
# confounds the library-dir and the import-dir (but not the
# dynamic-library-dir). That's pretty annoying, because Bazel won't
# allow overlap in the path to the interface files directory and the
# path to the static library. So we move the static library elsewhere
# and patch the .conf file accordingly.
#
# There were plans for controlling this, but they died. See:
# https://github.com/haskell/cabal/pull/3982#issuecomment-254038734
libraries=glob(os.path.join(libdir, "libHS*.a"))
package_conf_file = os.path.join(package_database, name + ".conf")
def make_relocatable_paths(line):
line = re.sub("library-dirs:.*", "library-dirs: ${pkgroot}/lib", line)
def make_relative_to_pkgroot(matchobj):
abspath=matchobj.group(0)
return os.path.join("${pkgroot}", os.path.relpath(abspath, start=pkgroot))
# The $execroot is an absolute path and should not leak into the output.
# Replace each ocurrence of execroot by a path relative to ${pkgroot}.
line = re.sub(re.escape(execroot) + '\S*', make_relative_to_pkgroot, line)
return line
if libraries != [] and os.path.isfile(package_conf_file):
for lib in libraries:
os.rename(lib, os.path.join(dynlibdir, os.path.basename(lib)))
tmp_package_conf_file = package_conf_file + ".tmp"
with open(package_conf_file, 'r', errors='surrogateescape') as package_conf:
with open(tmp_package_conf_file, 'w', errors='surrogateescape') as tmp_package_conf:
for line in package_conf.readlines():
print(make_relocatable_paths(line), file=tmp_package_conf)
os.remove(package_conf_file)
os.rename(tmp_package_conf_file, package_conf_file)
recache_db()
|
the-stack_106_28197 | import logging
import json
from libs import baseview, util, rollback
from rest_framework.response import Response
from django.http import HttpResponse
from core.models import SqlOrder, SqlRecord
from libs.serializers import Record
CUSTOM_ERROR = logging.getLogger('Yearning.core.views')
class record_order(baseview.SuperUserpermissions):
'''
:argument 记录展示请求接口api
:return 记录及记录总数
'''
def get(self, request, args=None):
try:
page = request.GET.get('page')
username = request.GET.get('username')
except KeyError as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__}: {e}')
return HttpResponse(status=500)
else:
try:
pagenumber = SqlOrder.objects.filter(status=1).all().values('id')
pagenumber.query.distinct = ['id']
start = int(page) * 10 - 10
end = int(page) * 10
sql = SqlOrder.objects.raw(
'''
select core_sqlorder.*,core_databaselist.connection_name, \
core_databaselist.computer_room from core_sqlorder \
INNER JOIN core_databaselist on \
core_sqlorder.bundle_id = core_databaselist.id where core_sqlorder.status = 1 and core_sqlorder.assigned = '%s'\
ORDER BY core_sqlorder.id desc
'''%username
)[start:end]
data = util.ser(sql)
return Response({'data': data, 'page': len(pagenumber)})
except Exception as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__}: {e}')
return HttpResponse(status=500)
class order_detail(baseview.BaseView):
'''
:argument 执行工单的详细信息请求接口api
'''
def get(self, request, args: str = None):
'''
:argument 详细信息数据展示
:param args: 根据获得的work_id status order_id 查找相关数据并返回
:return:
'''
try:
work_id = request.GET.get('workid')
status = request.GET.get('status')
order_id = request.GET.get('id')
except KeyError as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__}: {e}')
else:
type_id = SqlOrder.objects.filter(id=order_id).first()
try:
if status == '1':
data = SqlRecord.objects.filter(workid=work_id).all()
_serializers = Record(data, many=True)
return Response({'data':_serializers.data, 'type':type_id.type})
else:
data = SqlOrder.objects.filter(work_id=work_id).first()
_in = {'data':[{'sql': x} for x in data.sql.split(';')], 'type':type_id.type}
return Response(_in)
except Exception as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__} : {e}')
return HttpResponse(status=500)
def put(self, request, args: str = None):
'''
:argument 当工单驳回后重新提交功能接口api
:param args: 根据获得order_id 返回对应被驳回的sql
:return:
'''
try:
order_id = request.data['id']
except KeyError as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__}: {e}')
else:
try:
info = SqlOrder.objects.raw(
"select core_sqlorder.*,core_databaselist.connection_name,\
core_databaselist.computer_room from core_sqlorder INNER JOIN \
core_databaselist on core_sqlorder.bundle_id = core_databaselist.id \
WHERE core_sqlorder.id = %s" % order_id)
data = util.ser(info)
sql = data[0]['sql'].split(';')
_tmp = ''
for i in sql:
_tmp += i + ";\n"
return Response({'data':data[0], 'sql':_tmp.strip('\n'), 'type': 0})
except Exception as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__}: {e}')
return HttpResponse(status=500)
def post(self, request, args: str = None):
'''
:argument 当工单执行后sql回滚功能接口api
:param args: 根据获得order_id 返回对应的回滚sql
:return: {'data': data[0], 'sql': rollback_sql, 'type': 1}
'''
try:
order_id = request.data['id']
info = list(set(json.loads(request.data['opid'])))
except KeyError as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__}: {e}')
return HttpResponse(status=500)
else:
try:
sql = []
rollback_sql = []
for i in info:
info = SqlOrder.objects.raw(
"select core_sqlorder.*,core_databaselist.connection_name,\
core_databaselist.computer_room from core_sqlorder INNER JOIN \
core_databaselist on core_sqlorder.bundle_id = core_databaselist.id \
WHERE core_sqlorder.id = %s"
% order_id)
data = util.ser(info)
_data = SqlRecord.objects.filter(sequence=i).first()
roll = rollback.rollbackSQL(db=_data.backup_dbname, opid=i)
link = _data.backup_dbname + '.' + roll
sql.append(rollback.roll(backdb=link, opid=i))
for i in sql:
for c in i:
rollback_sql.append(c['rollback_statement'])
rollback_sql = sorted(rollback_sql)
if rollback_sql == []: return HttpResponse(status=500)
return Response({'data': data[0], 'sql': rollback_sql, 'type': 1})
except Exception as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__}: {e}')
return HttpResponse(status=500) |
the-stack_106_28198 | """
Building and world design commands
"""
from builtins import range
import re
from django.conf import settings
from django.db.models import Q
from evennia.objects.models import ObjectDB
from evennia.locks.lockhandler import LockException
from evennia.commands.cmdhandler import get_and_merge_cmdsets
from evennia.utils import create, utils, search
from evennia.utils.utils import inherits_from, class_from_module, get_all_typeclasses
from evennia.utils.eveditor import EvEditor
from evennia.utils.evmore import EvMore
from evennia.prototypes import spawner, prototypes as protlib, menus as olc_menus
from evennia.utils.ansi import raw
COMMAND_DEFAULT_CLASS = class_from_module(settings.COMMAND_DEFAULT_CLASS)
# limit symbol import for API
__all__ = ("ObjManipCommand", "CmdSetObjAlias", "CmdCopy",
"CmdCpAttr", "CmdMvAttr", "CmdCreate",
"CmdDesc", "CmdDestroy", "CmdDig", "CmdTunnel", "CmdLink",
"CmdUnLink", "CmdSetHome", "CmdListCmdSets", "CmdName",
"CmdOpen", "CmdSetAttribute", "CmdTypeclass", "CmdWipe",
"CmdLock", "CmdExamine", "CmdFind", "CmdTeleport",
"CmdScript", "CmdTag", "CmdSpawn")
# used by @set
from ast import literal_eval as _LITERAL_EVAL
# used by @find
CHAR_TYPECLASS = settings.BASE_CHARACTER_TYPECLASS
ROOM_TYPECLASS = settings.BASE_ROOM_TYPECLASS
EXIT_TYPECLASS = settings.BASE_EXIT_TYPECLASS
_DEFAULT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
_PROTOTYPE_PARENTS = None
class ObjManipCommand(COMMAND_DEFAULT_CLASS):
"""
This is a parent class for some of the defining objmanip commands
since they tend to have some more variables to define new objects.
Each object definition can have several components. First is
always a name, followed by an optional alias list and finally an
some optional data, such as a typeclass or a location. A comma ','
separates different objects. Like this:
name1;alias;alias;alias:option, name2;alias;alias ...
Spaces between all components are stripped.
A second situation is attribute manipulation. Such commands
are simpler and offer combinations
objname/attr/attr/attr, objname/attr, ...
"""
# OBS - this is just a parent - it's not intended to actually be
# included in a commandset on its own!
def parse(self):
"""
We need to expand the default parsing to get all
the cases, see the module doc.
"""
# get all the normal parsing done (switches etc)
super(ObjManipCommand, self).parse()
obj_defs = ([], []) # stores left- and right-hand side of '='
obj_attrs = ([], []) # "
for iside, arglist in enumerate((self.lhslist, self.rhslist)):
# lhslist/rhslist is already split by ',' at this point
for objdef in arglist:
aliases, option, attrs = [], None, []
if ':' in objdef:
objdef, option = [part.strip() for part in objdef.rsplit(':', 1)]
if ';' in objdef:
objdef, aliases = [part.strip() for part in objdef.split(';', 1)]
aliases = [alias.strip() for alias in aliases.split(';') if alias.strip()]
if '/' in objdef:
objdef, attrs = [part.strip() for part in objdef.split('/', 1)]
attrs = [part.strip().lower() for part in attrs.split('/') if part.strip()]
# store data
obj_defs[iside].append({"name": objdef, 'option': option, 'aliases': aliases})
obj_attrs[iside].append({"name": objdef, 'attrs': attrs})
# store for future access
self.lhs_objs = obj_defs[0]
self.rhs_objs = obj_defs[1]
self.lhs_objattr = obj_attrs[0]
self.rhs_objattr = obj_attrs[1]
class CmdSetObjAlias(COMMAND_DEFAULT_CLASS):
"""
adding permanent aliases for object
Usage:
@alias <obj> [= [alias[,alias,alias,...]]]
@alias <obj> =
@alias/category <obj> = [alias[,alias,...]:<category>
Switches:
category - requires ending input with :category, to store the
given aliases with the given category.
Assigns aliases to an object so it can be referenced by more
than one name. Assign empty to remove all aliases from object. If
assigning a category, all aliases given will be using this category.
Observe that this is not the same thing as personal aliases
created with the 'nick' command! Aliases set with @alias are
changing the object in question, making those aliases usable
by everyone.
"""
key = "@alias"
aliases = "@setobjalias"
switch_options = ("category",)
locks = "cmd:perm(setobjalias) or perm(Builder)"
help_category = "Building"
def func(self):
"""Set the aliases."""
caller = self.caller
if not self.lhs:
string = "Usage: @alias <obj> [= [alias[,alias ...]]]"
self.caller.msg(string)
return
objname = self.lhs
# Find the object to receive aliases
obj = caller.search(objname)
if not obj:
return
if self.rhs is None:
# no =, so we just list aliases on object.
aliases = obj.aliases.all(return_key_and_category=True)
if aliases:
caller.msg("Aliases for %s: %s" % (
obj.get_display_name(caller),
", ".join("'%s'%s" % (alias, "" if category is None else "[category:'%s']" % category)
for (alias, category) in aliases)))
else:
caller.msg("No aliases exist for '%s'." % obj.get_display_name(caller))
return
if not (obj.access(caller, "control") or obj.access(caller, 'edit')):
caller.msg("You don't have permission to do that.")
return
if not self.rhs:
# we have given an empty =, so delete aliases
old_aliases = obj.aliases.all()
if old_aliases:
caller.msg("Cleared aliases from %s: %s" % (obj.get_display_name(caller), ", ".join(old_aliases)))
obj.aliases.clear()
else:
caller.msg("No aliases to clear.")
return
category = None
if "category" in self.switches:
if ":" in self.rhs:
rhs, category = self.rhs.rsplit(':', 1)
category = category.strip()
else:
caller.msg("If specifying the /category switch, the category must be given "
"as :category at the end.")
else:
rhs = self.rhs
# merge the old and new aliases (if any)
old_aliases = obj.aliases.get(category=category, return_list=True)
new_aliases = [alias.strip().lower() for alias in rhs.split(',') if alias.strip()]
# make the aliases only appear once
old_aliases.extend(new_aliases)
aliases = list(set(old_aliases))
# save back to object.
obj.aliases.add(aliases, category=category)
# we need to trigger this here, since this will force
# (default) Exits to rebuild their Exit commands with the new
# aliases
obj.at_cmdset_get(force_init=True)
# report all aliases on the object
caller.msg("Alias(es) for '%s' set to '%s'%s." % (obj.get_display_name(caller),
str(obj.aliases), " (category: '%s')" % category if category else ""))
class CmdCopy(ObjManipCommand):
"""
copy an object and its properties
Usage:
@copy[/reset] <original obj> [= <new_name>][;alias;alias..]
[:<new_location>] [,<new_name2> ...]
switch:
reset - make a 'clean' copy off the object, thus
removing any changes that might have been made to the original
since it was first created.
Create one or more copies of an object. If you don't supply any targets,
one exact copy of the original object will be created with the name *_copy.
"""
key = "@copy"
switch_options = ("reset",)
locks = "cmd:perm(copy) or perm(Builder)"
help_category = "Building"
def func(self):
"""Uses ObjManipCommand.parse()"""
caller = self.caller
args = self.args
if not args:
caller.msg("Usage: @copy <obj> [=<new_name>[;alias;alias..]]"
"[:<new_location>] [, <new_name2>...]")
return
if not self.rhs:
# this has no target =, so an identical new object is created.
from_obj_name = self.args
from_obj = caller.search(from_obj_name)
if not from_obj:
return
to_obj_name = "%s_copy" % from_obj_name
to_obj_aliases = ["%s_copy" % alias for alias in from_obj.aliases.all()]
copiedobj = ObjectDB.objects.copy_object(from_obj, new_key=to_obj_name,
new_aliases=to_obj_aliases)
if copiedobj:
string = "Identical copy of %s, named '%s' was created." % (from_obj_name, to_obj_name)
else:
string = "There was an error copying %s."
else:
# we have specified =. This might mean many object targets
from_obj_name = self.lhs_objs[0]['name']
from_obj = caller.search(from_obj_name)
if not from_obj:
return
for objdef in self.rhs_objs:
# loop through all possible copy-to targets
to_obj_name = objdef['name']
to_obj_aliases = objdef['aliases']
to_obj_location = objdef['option']
if to_obj_location:
to_obj_location = caller.search(to_obj_location,
global_search=True)
if not to_obj_location:
return
copiedobj = ObjectDB.objects.copy_object(from_obj,
new_key=to_obj_name,
new_location=to_obj_location,
new_aliases=to_obj_aliases)
if copiedobj:
string = "Copied %s to '%s' (aliases: %s)." % (from_obj_name, to_obj_name,
to_obj_aliases)
else:
string = "There was an error copying %s to '%s'." % (from_obj_name,
to_obj_name)
# we are done, echo to user
caller.msg(string)
class CmdCpAttr(ObjManipCommand):
"""
copy attributes between objects
Usage:
@cpattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
@cpattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
@cpattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
@cpattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]
Switches:
move - delete the attribute from the source object after copying.
Example:
@cpattr coolness = Anna/chillout, Anna/nicety, Tom/nicety
->
copies the coolness attribute (defined on yourself), to attributes
on Anna and Tom.
Copy the attribute one object to one or more attributes on another object.
If you don't supply a source object, yourself is used.
"""
key = "@cpattr"
switch_options = ("move",)
locks = "cmd:perm(cpattr) or perm(Builder)"
help_category = "Building"
def check_from_attr(self, obj, attr, clear=False):
"""
Hook for overriding on subclassed commands. Checks to make sure a
caller can copy the attr from the object in question. If not, return a
false value and the command will abort. An error message should be
provided by this function.
If clear is True, user is attempting to move the attribute.
"""
return True
def check_to_attr(self, obj, attr):
"""
Hook for overriding on subclassed commands. Checks to make sure a
caller can write to the specified attribute on the specified object.
If not, return a false value and the attribute will be skipped. An
error message should be provided by this function.
"""
return True
def check_has_attr(self, obj, attr):
"""
Hook for overriding on subclassed commands. Do any preprocessing
required and verify an object has an attribute.
"""
if not obj.attributes.has(attr):
self.caller.msg(
"%s doesn't have an attribute %s." % (obj.name, attr))
return False
return True
def get_attr(self, obj, attr):
"""
Hook for overriding on subclassed commands. Do any preprocessing
required and get the attribute from the object.
"""
return obj.attributes.get(attr)
def func(self):
"""
Do the copying.
"""
caller = self.caller
if not self.rhs:
string = """Usage:
@cpattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
@cpattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
@cpattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
@cpattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]"""
caller.msg(string)
return
lhs_objattr = self.lhs_objattr
to_objs = self.rhs_objattr
from_obj_name = lhs_objattr[0]['name']
from_obj_attrs = lhs_objattr[0]['attrs']
if not from_obj_attrs:
# this means the from_obj_name is actually an attribute
# name on self.
from_obj_attrs = [from_obj_name]
from_obj = self.caller
else:
from_obj = caller.search(from_obj_name)
if not from_obj or not to_objs:
caller.msg("You have to supply both source object and target(s).")
return
# copy to all to_obj:ects
if "move" in self.switches:
clear = True
else:
clear = False
if not self.check_from_attr(from_obj, from_obj_attrs[0], clear=clear):
return
for attr in from_obj_attrs:
if not self.check_has_attr(from_obj, attr):
return
if (len(from_obj_attrs) != len(set(from_obj_attrs))) and clear:
self.caller.msg("|RCannot have duplicate source names when moving!")
return
result = []
for to_obj in to_objs:
to_obj_name = to_obj['name']
to_obj_attrs = to_obj['attrs']
to_obj = caller.search(to_obj_name)
if not to_obj:
result.append("\nCould not find object '%s'" % to_obj_name)
continue
for inum, from_attr in enumerate(from_obj_attrs):
try:
to_attr = to_obj_attrs[inum]
except IndexError:
# if there are too few attributes given
# on the to_obj, we copy the original name instead.
to_attr = from_attr
if not self.check_to_attr(to_obj, to_attr):
continue
value = self.get_attr(from_obj, from_attr)
to_obj.attributes.add(to_attr, value)
if (clear and not (from_obj == to_obj and
from_attr == to_attr)):
from_obj.attributes.remove(from_attr)
result.append("\nMoved %s.%s -> %s.%s. (value: %s)" % (from_obj.name,
from_attr,
to_obj_name,
to_attr,
repr(value)))
else:
result.append("\nCopied %s.%s -> %s.%s. (value: %s)" % (from_obj.name,
from_attr,
to_obj_name,
to_attr,
repr(value)))
caller.msg("".join(result))
class CmdMvAttr(ObjManipCommand):
"""
move attributes between objects
Usage:
@mvattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
@mvattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
@mvattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
@mvattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]
Switches:
copy - Don't delete the original after moving.
Move an attribute from one object to one or more attributes on another
object. If you don't supply a source object, yourself is used.
"""
key = "@mvattr"
switch_options = ("copy",)
locks = "cmd:perm(mvattr) or perm(Builder)"
help_category = "Building"
def func(self):
"""
Do the moving
"""
if not self.rhs:
string = """Usage:
@mvattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
@mvattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
@mvattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
@mvattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]"""
self.caller.msg(string)
return
# simply use @cpattr for all the functionality
if "copy" in self.switches:
self.execute_cmd("@cpattr %s" % self.args)
else:
self.execute_cmd("@cpattr/move %s" % self.args)
class CmdCreate(ObjManipCommand):
"""
create new objects
Usage:
@create[/drop] <objname>[;alias;alias...][:typeclass], <objname>...
switch:
drop - automatically drop the new object into your current
location (this is not echoed). This also sets the new
object's home to the current location rather than to you.
Creates one or more new objects. If typeclass is given, the object
is created as a child of this typeclass. The typeclass script is
assumed to be located under types/ and any further
directory structure is given in Python notation. So if you have a
correct typeclass 'RedButton' defined in
types/examples/red_button.py, you could create a new
object of this type like this:
@create/drop button;red : examples.red_button.RedButton
"""
key = "@create"
switch_options = ("drop",)
locks = "cmd:perm(create) or perm(Builder)"
help_category = "Building"
# lockstring of newly created objects, for easy overloading.
# Will be formatted with the {id} of the creating object.
new_obj_lockstring = "control:id({id}) or perm(Admin);delete:id({id}) or perm(Admin)"
def func(self):
"""
Creates the object.
"""
caller = self.caller
if not self.args:
string = "Usage: @create[/drop] <newname>[;alias;alias...] [:typeclass.path]"
caller.msg(string)
return
# create the objects
for objdef in self.lhs_objs:
string = ""
name = objdef['name']
aliases = objdef['aliases']
typeclass = objdef['option']
# create object (if not a valid typeclass, the default
# object typeclass will automatically be used)
lockstring = self.new_obj_lockstring.format(id=caller.id)
obj = create.create_object(typeclass, name, caller,
home=caller, aliases=aliases,
locks=lockstring, report_to=caller)
if not obj:
continue
if aliases:
string = "You create a new %s: %s (aliases: %s)."
string = string % (obj.typename, obj.name, ", ".join(aliases))
else:
string = "You create a new %s: %s."
string = string % (obj.typename, obj.name)
# set a default desc
if not obj.db.desc:
obj.db.desc = "You see nothing special."
if 'drop' in self.switches:
if caller.location:
obj.home = caller.location
obj.move_to(caller.location, quiet=True)
if string:
caller.msg(string)
def _desc_load(caller):
return caller.db.evmenu_target.db.desc or ""
def _desc_save(caller, buf):
"""
Save line buffer to the desc prop. This should
return True if successful and also report its status to the user.
"""
caller.db.evmenu_target.db.desc = buf
caller.msg("Saved.")
return True
def _desc_quit(caller):
caller.attributes.remove("evmenu_target")
caller.msg("Exited editor.")
class CmdDesc(COMMAND_DEFAULT_CLASS):
"""
describe an object or the current room.
Usage:
@desc [<obj> =] <description>
Switches:
edit - Open up a line editor for more advanced editing.
Sets the "desc" attribute on an object. If an object is not given,
describe the current room.
"""
key = "@desc"
aliases = "@describe"
switch_options = ("edit",)
locks = "cmd:perm(desc) or perm(Builder)"
help_category = "Building"
def edit_handler(self):
if self.rhs:
self.msg("|rYou may specify a value, or use the edit switch, "
"but not both.|n")
return
if self.args:
obj = self.caller.search(self.args)
else:
obj = self.caller.location or self.msg("|rYou can't describe oblivion.|n")
if not obj:
return
if not (obj.access(self.caller, 'control') or obj.access(self.caller, 'edit')):
self.caller.msg("You don't have permission to edit the description of %s." % obj.key)
self.caller.db.evmenu_target = obj
# launch the editor
EvEditor(self.caller, loadfunc=_desc_load, savefunc=_desc_save,
quitfunc=_desc_quit, key="desc", persistent=True)
return
def func(self):
"""Define command"""
caller = self.caller
if not self.args and 'edit' not in self.switches:
caller.msg("Usage: @desc [<obj> =] <description>")
return
if 'edit' in self.switches:
self.edit_handler()
return
if '=' in self.args:
# We have an =
obj = caller.search(self.lhs)
if not obj:
return
desc = self.rhs or ''
else:
obj = caller.location or self.msg("|rYou can't describe oblivion.|n")
if not obj:
return
desc = self.args
if (obj.access(self.caller, 'control') or obj.access(self.caller, 'edit')):
obj.db.desc = desc
caller.msg("The description was set on %s." % obj.get_display_name(caller))
else:
caller.msg("You don't have permission to edit the description of %s." % obj.key)
class CmdDestroy(COMMAND_DEFAULT_CLASS):
"""
permanently delete objects
Usage:
@destroy[/switches] [obj, obj2, obj3, [dbref-dbref], ...]
Switches:
override - The @destroy command will usually avoid accidentally
destroying account objects. This switch overrides this safety.
force - destroy without confirmation.
Examples:
@destroy house, roof, door, 44-78
@destroy 5-10, flower, 45
@destroy/force north
Destroys one or many objects. If dbrefs are used, a range to delete can be
given, e.g. 4-10. Also the end points will be deleted. This command
displays a confirmation before destroying, to make sure of your choice.
You can specify the /force switch to bypass this confirmation.
"""
key = "@destroy"
aliases = ["@delete", "@del"]
switch_options = ("override", "force")
locks = "cmd:perm(destroy) or perm(Builder)"
help_category = "Building"
confirm = True # set to False to always bypass confirmation
default_confirm = 'yes' # what to assume if just pressing enter (yes/no)
def func(self):
"""Implements the command."""
caller = self.caller
delete = True
if not self.args or not self.lhslist:
caller.msg("Usage: @destroy[/switches] [obj, obj2, obj3, [dbref-dbref],...]")
delete = False
def delobj(obj):
# helper function for deleting a single object
string = ""
if not obj.pk:
string = "\nObject %s was already deleted." % obj.db_key
else:
objname = obj.name
if not (obj.access(caller, "control") or obj.access(caller, 'delete')):
return "\nYou don't have permission to delete %s." % objname
if obj.account and 'override' not in self.switches:
return "\nObject %s is controlled by an active account. Use /override to delete anyway." % objname
if obj.dbid == int(settings.DEFAULT_HOME.lstrip("#")):
return "\nYou are trying to delete |c%s|n, which is set as DEFAULT_HOME. " \
"Re-point settings.DEFAULT_HOME to another " \
"object before continuing." % objname
had_exits = hasattr(obj, "exits") and obj.exits
had_objs = hasattr(obj, "contents") and any(obj for obj in obj.contents
if not (hasattr(obj, "exits") and obj not in obj.exits))
# do the deletion
okay = obj.delete()
if not okay:
string += "\nERROR: %s not deleted, probably because delete() returned False." % objname
else:
string += "\n%s was destroyed." % objname
if had_exits:
string += " Exits to and from %s were destroyed as well." % objname
if had_objs:
string += " Objects inside %s were moved to their homes." % objname
return string
objs = []
for objname in self.lhslist:
if not delete:
continue
if '-' in objname:
# might be a range of dbrefs
dmin, dmax = [utils.dbref(part, reqhash=False)
for part in objname.split('-', 1)]
if dmin and dmax:
for dbref in range(int(dmin), int(dmax + 1)):
obj = caller.search("#" + str(dbref))
if obj:
objs.append(obj)
continue
else:
obj = caller.search(objname)
else:
obj = caller.search(objname)
if obj is None:
self.caller.msg(" (Objects to destroy must either be local or specified with a unique #dbref.)")
elif obj not in objs:
objs.append(obj)
if objs and ("force" not in self.switches and type(self).confirm):
confirm = "Are you sure you want to destroy "
if len(objs) == 1:
confirm += objs[0].get_display_name(caller)
elif len(objs) < 5:
confirm += ", ".join([obj.get_display_name(caller) for obj in objs])
else:
confirm += ", ".join(["#{}".format(obj.id) for obj in objs])
confirm += " [yes]/no?" if self.default_confirm == 'yes' else " yes/[no]"
answer = ""
while answer.strip().lower() not in ("y", "yes", "n", "no"):
answer = yield(confirm)
answer = self.default_confirm if answer == '' else answer
if answer.strip().lower() in ("n", "no"):
caller.msg("Cancelled: no object was destroyed.")
delete = False
if delete:
results = []
for obj in objs:
results.append(delobj(obj))
if results:
caller.msg("".join(results).strip())
class CmdDig(ObjManipCommand):
"""
build new rooms and connect them to the current location
Usage:
@dig[/switches] <roomname>[;alias;alias...][:typeclass]
[= <exit_to_there>[;alias][:typeclass]]
[, <exit_to_here>[;alias][:typeclass]]
Switches:
tel or teleport - move yourself to the new room
Examples:
@dig kitchen = north;n, south;s
@dig house:myrooms.MyHouseTypeclass
@dig sheer cliff;cliff;sheer = climb up, climb down
This command is a convenient way to build rooms quickly; it creates the
new room and you can optionally set up exits back and forth between your
current room and the new one. You can add as many aliases as you
like to the name of the room and the exits in question; an example
would be 'north;no;n'.
"""
key = "@dig"
switch_options = ("teleport",)
locks = "cmd:perm(dig) or perm(Builder)"
help_category = "Building"
# lockstring of newly created rooms, for easy overloading.
# Will be formatted with the {id} of the creating object.
new_room_lockstring = "control:id({id}) or perm(Admin); " \
"delete:id({id}) or perm(Admin); " \
"edit:id({id}) or perm(Admin)"
def func(self):
"""Do the digging. Inherits variables from ObjManipCommand.parse()"""
caller = self.caller
if not self.lhs:
string = "Usage: @dig[/teleport] <roomname>[;alias;alias...]" \
"[:parent] [= <exit_there>"
string += "[;alias;alias..][:parent]] "
string += "[, <exit_back_here>[;alias;alias..][:parent]]"
caller.msg(string)
return
room = self.lhs_objs[0]
if not room["name"]:
caller.msg("You must supply a new room name.")
return
location = caller.location
# Create the new room
typeclass = room['option']
if not typeclass:
typeclass = settings.BASE_ROOM_TYPECLASS
# create room
new_room = create.create_object(typeclass, room["name"],
aliases=room["aliases"],
report_to=caller)
lockstring = self.new_room_lockstring.format(id=caller.id)
new_room.locks.add(lockstring)
alias_string = ""
if new_room.aliases.all():
alias_string = " (%s)" % ", ".join(new_room.aliases.all())
room_string = "Created room %s(%s)%s of type %s." % (
new_room, new_room.dbref, alias_string, typeclass)
# create exit to room
exit_to_string = ""
exit_back_string = ""
if self.rhs_objs:
to_exit = self.rhs_objs[0]
if not to_exit["name"]:
exit_to_string = "\nNo exit created to new room."
elif not location:
exit_to_string = "\nYou cannot create an exit from a None-location."
else:
# Build the exit to the new room from the current one
typeclass = to_exit["option"]
if not typeclass:
typeclass = settings.BASE_EXIT_TYPECLASS
new_to_exit = create.create_object(typeclass, to_exit["name"],
location,
aliases=to_exit["aliases"],
locks=lockstring,
destination=new_room,
report_to=caller)
alias_string = ""
if new_to_exit.aliases.all():
alias_string = " (%s)" % ", ".join(new_to_exit.aliases.all())
exit_to_string = "\nCreated Exit from %s to %s: %s(%s)%s."
exit_to_string = exit_to_string % (location.name,
new_room.name,
new_to_exit,
new_to_exit.dbref,
alias_string)
# Create exit back from new room
if len(self.rhs_objs) > 1:
# Building the exit back to the current room
back_exit = self.rhs_objs[1]
if not back_exit["name"]:
exit_back_string = "\nNo back exit created."
elif not location:
exit_back_string = "\nYou cannot create an exit back to a None-location."
else:
typeclass = back_exit["option"]
if not typeclass:
typeclass = settings.BASE_EXIT_TYPECLASS
new_back_exit = create.create_object(typeclass,
back_exit["name"],
new_room,
aliases=back_exit["aliases"],
locks=lockstring,
destination=location,
report_to=caller)
alias_string = ""
if new_back_exit.aliases.all():
alias_string = " (%s)" % ", ".join(new_back_exit.aliases.all())
exit_back_string = "\nCreated Exit back from %s to %s: %s(%s)%s."
exit_back_string = exit_back_string % (new_room.name,
location.name,
new_back_exit,
new_back_exit.dbref,
alias_string)
caller.msg("%s%s%s" % (room_string, exit_to_string, exit_back_string))
if new_room and 'teleport' in self.switches:
caller.move_to(new_room)
class CmdTunnel(COMMAND_DEFAULT_CLASS):
"""
create new rooms in cardinal directions only
Usage:
@tunnel[/switch] <direction>[:typeclass] [= <roomname>[;alias;alias;...][:typeclass]]
Switches:
oneway - do not create an exit back to the current location
tel - teleport to the newly created room
Example:
@tunnel n
@tunnel n = house;mike's place;green building
This is a simple way to build using pre-defined directions:
|wn,ne,e,se,s,sw,w,nw|n (north, northeast etc)
|wu,d|n (up and down)
|wi,o|n (in and out)
The full names (north, in, southwest, etc) will always be put as
main name for the exit, using the abbreviation as an alias (so an
exit will always be able to be used with both "north" as well as
"n" for example). Opposite directions will automatically be
created back from the new room unless the /oneway switch is given.
For more flexibility and power in creating rooms, use @dig.
"""
key = "@tunnel"
aliases = ["@tun"]
switch_options = ("oneway", "tel")
locks = "cmd: perm(tunnel) or perm(Builder)"
help_category = "Building"
# store the direction, full name and its opposite
directions = {"n": ("north", "s"),
"ne": ("northeast", "sw"),
"e": ("east", "w"),
"se": ("southeast", "nw"),
"s": ("south", "n"),
"sw": ("southwest", "ne"),
"w": ("west", "e"),
"nw": ("northwest", "se"),
"u": ("up", "d"),
"d": ("down", "u"),
"i": ("in", "o"),
"o": ("out", "i")}
def func(self):
"""Implements the tunnel command"""
if not self.args or not self.lhs:
string = "Usage: @tunnel[/switch] <direction>[:typeclass] [= <roomname>" \
"[;alias;alias;...][:typeclass]]"
self.caller.msg(string)
return
# If we get a typeclass, we need to get just the exitname
exitshort = self.lhs.split(":")[0]
if exitshort not in self.directions:
string = "@tunnel can only understand the following directions: %s." % ",".join(
sorted(self.directions.keys()))
string += "\n(use @dig for more freedom)"
self.caller.msg(string)
return
# retrieve all input and parse it
exitname, backshort = self.directions[exitshort]
backname = self.directions[backshort][0]
# if we recieved a typeclass for the exit, add it to the alias(short name)
if ":" in self.lhs:
# limit to only the first : character
exit_typeclass = ":" + self.lhs.split(":", 1)[-1]
# exitshort and backshort are the last part of the exit strings,
# so we add our typeclass argument after
exitshort += exit_typeclass
backshort += exit_typeclass
roomname = "Some place"
if self.rhs:
roomname = self.rhs # this may include aliases; that's fine.
telswitch = ""
if "tel" in self.switches:
telswitch = "/teleport"
backstring = ""
if "oneway" not in self.switches:
backstring = ", %s;%s" % (backname, backshort)
# build the string we will use to call @dig
digstring = "@dig%s %s = %s;%s%s" % (telswitch, roomname,
exitname, exitshort, backstring)
self.execute_cmd(digstring)
class CmdLink(COMMAND_DEFAULT_CLASS):
"""
link existing rooms together with exits
Usage:
@link[/switches] <object> = <target>
@link[/switches] <object> =
@link[/switches] <object>
Switch:
twoway - connect two exits. For this to work, BOTH <object>
and <target> must be exit objects.
If <object> is an exit, set its destination to <target>. Two-way operation
instead sets the destination to the *locations* of the respective given
arguments.
The second form (a lone =) sets the destination to None (same as
the @unlink command) and the third form (without =) just shows the
currently set destination.
"""
key = "@link"
locks = "cmd:perm(link) or perm(Builder)"
help_category = "Building"
def func(self):
"""Perform the link"""
caller = self.caller
if not self.args:
caller.msg("Usage: @link[/twoway] <object> = <target>")
return
object_name = self.lhs
# get object
obj = caller.search(object_name, global_search=True)
if not obj:
return
if self.rhs:
# this means a target name was given
target = caller.search(self.rhs, global_search=True)
if not target:
return
string = ""
note = "Note: %s(%s) did not have a destination set before. Make sure you linked the right thing."
if not obj.destination:
string = note % (obj.name, obj.dbref)
if "twoway" in self.switches:
if not (target.location and obj.location):
string = "To create a two-way link, %s and %s must both have a location" % (obj, target)
string += " (i.e. they cannot be rooms, but should be exits)."
self.caller.msg(string)
return
if not target.destination:
string += note % (target.name, target.dbref)
obj.destination = target.location
target.destination = obj.location
string += "\nLink created %s (in %s) <-> %s (in %s) (two-way)." % (
obj.name, obj.location, target.name, target.location)
else:
obj.destination = target
string += "\nLink created %s -> %s (one way)." % (obj.name, target)
elif self.rhs is None:
# this means that no = was given (otherwise rhs
# would have been an empty string). So we inspect
# the home/destination on object
dest = obj.destination
if dest:
string = "%s is an exit to %s." % (obj.name, dest.name)
else:
string = "%s is not an exit. Its home location is %s." % (obj.name, obj.home)
else:
# We gave the command @link 'obj = ' which means we want to
# clear destination.
if obj.destination:
obj.destination = None
string = "Former exit %s no longer links anywhere." % obj.name
else:
string = "%s had no destination to unlink." % obj.name
# give feedback
caller.msg(string.strip())
class CmdUnLink(CmdLink):
"""
remove exit-connections between rooms
Usage:
@unlink <Object>
Unlinks an object, for example an exit, disconnecting
it from whatever it was connected to.
"""
# this is just a child of CmdLink
key = "@unlink"
locks = "cmd:perm(unlink) or perm(Builder)"
help_key = "Building"
def func(self):
"""
All we need to do here is to set the right command
and call func in CmdLink
"""
caller = self.caller
if not self.args:
caller.msg("Usage: @unlink <object>")
return
# This mimics '@link <obj> = ' which is the same as @unlink
self.rhs = ""
# call the @link functionality
super(CmdUnLink, self).func()
class CmdSetHome(CmdLink):
"""
set an object's home location
Usage:
@sethome <obj> [= <home_location>]
The "home" location is a "safety" location for objects; they
will be moved there if their current location ceases to exist. All
objects should always have a home location for this reason.
It is also a convenient target of the "home" command.
If no location is given, just view the object's home location.
"""
key = "@sethome"
locks = "cmd:perm(@sethome) or perm(Builder)"
help_category = "Building"
def func(self):
"""implement the command"""
if not self.args:
string = "Usage: @sethome <obj> [= <home_location>]"
self.caller.msg(string)
return
obj = self.caller.search(self.lhs, global_search=True)
if not obj:
return
if not self.rhs:
# just view
home = obj.home
if not home:
string = "This object has no home location set!"
else:
string = "%s's current home is %s(%s)." % (obj, home,
home.dbref)
else:
# set a home location
new_home = self.caller.search(self.rhs, global_search=True)
if not new_home:
return
old_home = obj.home
obj.home = new_home
if old_home:
string = "%s's home location was changed from %s(%s) to %s(%s)." % (
obj, old_home, old_home.dbref, new_home, new_home.dbref)
else:
string = "%s' home location was set to %s(%s)." % (obj, new_home, new_home.dbref)
self.caller.msg(string)
class CmdListCmdSets(COMMAND_DEFAULT_CLASS):
"""
list command sets defined on an object
Usage:
@cmdsets <obj>
This displays all cmdsets assigned
to a user. Defaults to yourself.
"""
key = "@cmdsets"
aliases = "@listcmsets"
locks = "cmd:perm(listcmdsets) or perm(Builder)"
help_category = "Building"
def func(self):
"""list the cmdsets"""
caller = self.caller
if self.arglist:
obj = caller.search(self.arglist[0])
if not obj:
return
else:
obj = caller
string = "%s" % obj.cmdset
caller.msg(string)
class CmdName(ObjManipCommand):
"""
change the name and/or aliases of an object
Usage:
@name <obj> = <newname>;alias1;alias2
Rename an object to something new. Use *obj to
rename an account.
"""
key = "@name"
aliases = ["@rename"]
locks = "cmd:perm(rename) or perm(Builder)"
help_category = "Building"
def func(self):
"""change the name"""
caller = self.caller
if not self.args:
caller.msg("Usage: @name <obj> = <newname>[;alias;alias;...]")
return
obj = None
if self.lhs_objs:
objname = self.lhs_objs[0]['name']
if objname.startswith("*"):
# account mode
obj = caller.account.search(objname.lstrip("*"))
if obj:
if self.rhs_objs[0]['aliases']:
caller.msg("Accounts can't have aliases.")
return
newname = self.rhs
if not newname:
caller.msg("No name defined!")
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You don't have right to edit this account %s." % obj)
return
obj.username = newname
obj.save()
caller.msg("Account's name changed to '%s'." % newname)
return
# object search, also with *
obj = caller.search(objname)
if not obj:
return
if self.rhs_objs:
newname = self.rhs_objs[0]['name']
aliases = self.rhs_objs[0]['aliases']
else:
newname = self.rhs
aliases = None
if not newname and not aliases:
caller.msg("No names or aliases defined!")
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You don't have the right to edit %s." % obj)
return
# change the name and set aliases:
if newname:
obj.name = newname
astring = ""
if aliases:
[obj.aliases.add(alias) for alias in aliases]
astring = " (%s)" % (", ".join(aliases))
# fix for exits - we need their exit-command to change name too
if obj.destination:
obj.flush_from_cache(force=True)
caller.msg("Object's name changed to '%s'%s." % (newname, astring))
class CmdOpen(ObjManipCommand):
"""
open a new exit from the current room
Usage:
@open <new exit>[;alias;alias..][:typeclass] [,<return exit>[;alias;..][:typeclass]]] = <destination>
Handles the creation of exits. If a destination is given, the exit
will point there. The <return exit> argument sets up an exit at the
destination leading back to the current room. Destination name
can be given both as a #dbref and a name, if that name is globally
unique.
"""
key = "@open"
locks = "cmd:perm(open) or perm(Builder)"
help_category = "Building"
# a custom member method to chug out exits and do checks
def create_exit(self, exit_name, location, destination,
exit_aliases=None, typeclass=None):
"""
Helper function to avoid code duplication.
At this point we know destination is a valid location
"""
caller = self.caller
string = ""
# check if this exit object already exists at the location.
# we need to ignore errors (so no automatic feedback)since we
# have to know the result of the search to decide what to do.
exit_obj = caller.search(exit_name, location=location, quiet=True, exact=True)
if len(exit_obj) > 1:
# give error message and return
caller.search(exit_name, location=location, exact=True)
return None
if exit_obj:
exit_obj = exit_obj[0]
if not exit_obj.destination:
# we are trying to link a non-exit
string = "'%s' already exists and is not an exit!\nIf you want to convert it "
string += "to an exit, you must assign an object to the 'destination' property first."
caller.msg(string % exit_name)
return None
# we are re-linking an old exit.
old_destination = exit_obj.destination
if old_destination:
string = "Exit %s already exists." % exit_name
if old_destination.id != destination.id:
# reroute the old exit.
exit_obj.destination = destination
if exit_aliases:
[exit_obj.aliases.add(alias) for alias in exit_aliases]
string += " Rerouted its old destination '%s' to '%s' and changed aliases." % (
old_destination.name, destination.name)
else:
string += " It already points to the correct place."
else:
# exit does not exist before. Create a new one.
if not typeclass:
typeclass = settings.BASE_EXIT_TYPECLASS
exit_obj = create.create_object(typeclass,
key=exit_name,
location=location,
aliases=exit_aliases,
report_to=caller)
if exit_obj:
# storing a destination is what makes it an exit!
exit_obj.destination = destination
string = "" if not exit_aliases else " (aliases: %s)" % (
", ".join([str(e) for e in exit_aliases]))
string = "Created new Exit '%s' from %s to %s%s." % (
exit_name, location.name, destination.name, string)
else:
string = "Error: Exit '%s' not created." % exit_name
# emit results
caller.msg(string)
return exit_obj
def func(self):
"""
This is where the processing starts.
Uses the ObjManipCommand.parser() for pre-processing
as well as the self.create_exit() method.
"""
caller = self.caller
if not self.args or not self.rhs:
string = "Usage: @open <new exit>[;alias...][:typeclass][,<return exit>[;alias..][:typeclass]]] "
string += "= <destination>"
caller.msg(string)
return
# We must have a location to open an exit
location = caller.location
if not location:
caller.msg("You cannot create an exit from a None-location.")
return
# obtain needed info from cmdline
exit_name = self.lhs_objs[0]['name']
exit_aliases = self.lhs_objs[0]['aliases']
exit_typeclass = self.lhs_objs[0]['option']
dest_name = self.rhs
# first, check so the destination exists.
destination = caller.search(dest_name, global_search=True)
if not destination:
return
# Create exit
ok = self.create_exit(exit_name,
location,
destination,
exit_aliases,
exit_typeclass)
if not ok:
# an error; the exit was not created, so we quit.
return
# Create back exit, if any
if len(self.lhs_objs) > 1:
back_exit_name = self.lhs_objs[1]['name']
back_exit_aliases = self.lhs_objs[1]['aliases']
back_exit_typeclass = self.lhs_objs[1]['option']
self.create_exit(back_exit_name,
destination,
location,
back_exit_aliases,
back_exit_typeclass)
def _convert_from_string(cmd, strobj):
"""
Converts a single object in *string form* to its equivalent python
type.
Python earlier than 2.6:
Handles floats, ints, and limited nested lists and dicts
(can't handle lists in a dict, for example, this is mainly due to
the complexity of parsing this rather than any technical difficulty -
if there is a need for @set-ing such complex structures on the
command line we might consider adding it).
Python 2.6 and later:
Supports all Python structures through literal_eval as long as they
are valid Python syntax. If they are not (such as [test, test2], ie
without the quotes around the strings), the entire structure will
be converted to a string and a warning will be given.
We need to convert like this since all data being sent over the
telnet connection by the Account is text - but we will want to
store it as the "real" python type so we can do convenient
comparisons later (e.g. obj.db.value = 2, if value is stored as a
string this will always fail).
"""
def rec_convert(obj):
"""
Helper function of recursive conversion calls. This is only
used for Python <=2.5. After that literal_eval is available.
"""
# simple types
try:
return int(obj)
except ValueError:
# obj cannot be converted to int - that's fine
pass
try:
return float(obj)
except ValueError:
# obj cannot be converted to float - that's fine
pass
# iterables
if obj.startswith('[') and obj.endswith(']'):
"A list. Traverse recursively."
return [rec_convert(val) for val in obj[1:-1].split(',')]
if obj.startswith('(') and obj.endswith(')'):
"A tuple. Traverse recursively."
return tuple([rec_convert(val) for val in obj[1:-1].split(',')])
if obj.startswith('{') and obj.endswith('}') and ':' in obj:
"A dict. Traverse recursively."
return dict([(rec_convert(pair.split(":", 1)[0]),
rec_convert(pair.split(":", 1)[1]))
for pair in obj[1:-1].split(',') if ":" in pair])
# if nothing matches, return as-is
return obj
# Use literal_eval to parse python structure exactly.
try:
return _LITERAL_EVAL(strobj)
except (SyntaxError, ValueError):
# treat as string
strobj = utils.to_str(strobj)
string = "|RNote: name \"|r%s|R\" was converted to a string. " \
"Make sure this is acceptable." % strobj
cmd.caller.msg(string)
return strobj
else:
# fall back to old recursive solution (does not support
# nested lists/dicts)
return rec_convert(strobj.strip())
class CmdSetAttribute(ObjManipCommand):
"""
set attribute on an object or account
Usage:
@set <obj>/<attr> = <value>
@set <obj>/<attr> =
@set <obj>/<attr>
@set *<account>/attr = <value>
Switch:
edit: Open the line editor (string values only)
script: If we're trying to set an attribute on a script
channel: If we're trying to set an attribute on a channel
account: If we're trying to set an attribute on an account
room: Setting an attribute on a room (global search)
exit: Setting an attribute on an exit (global search)
char: Setting an attribute on a character (global search)
character: Alias for char, as above.
Sets attributes on objects. The second form clears
a previously set attribute while the last form
inspects the current value of the attribute
(if any).
The most common data to save with this command are strings and
numbers. You can however also set Python primitives such as lists,
dictionaries and tuples on objects (this might be important for
the functionality of certain custom objects). This is indicated
by you starting your value with one of |c'|n, |c"|n, |c(|n, |c[|n
or |c{ |n.
Note that you should leave a space after starting a dictionary ('{ ')
so as to not confuse the dictionary start with a colour code like \{g.
Remember that if you use Python primitives like this, you must
write proper Python syntax too - notably you must include quotes
around your strings or you will get an error.
"""
key = "@set"
locks = "cmd:perm(set) or perm(Builder)"
help_category = "Building"
def check_obj(self, obj):
"""
This may be overridden by subclasses in case restrictions need to be
placed on whether certain objects can have attributes set by certain
accounts.
This function is expected to display its own error message.
Returning False will abort the command.
"""
return True
def check_attr(self, obj, attr_name):
"""
This may be overridden by subclasses in case restrictions need to be
placed on what attributes can be set by who beyond the normal lock.
This functions is expected to display its own error message. It is
run once for every attribute that is checked, blocking only those
attributes which are not permitted and letting the others through.
"""
return attr_name
def view_attr(self, obj, attr):
"""
Look up the value of an attribute and return a string displaying it.
"""
if obj.attributes.has(attr):
return "\nAttribute %s/%s = %s" % (obj.name, attr,
obj.attributes.get(attr))
else:
return "\n%s has no attribute '%s'." % (obj.name, attr)
def rm_attr(self, obj, attr):
"""
Remove an attribute from the object, and report back.
"""
if obj.attributes.has(attr):
val = obj.attributes.has(attr)
obj.attributes.remove(attr)
return "\nDeleted attribute '%s' (= %s) from %s." % (attr, val, obj.name)
else:
return "\n%s has no attribute '%s'." % (obj.name, attr)
def set_attr(self, obj, attr, value):
try:
verb = "Modified" if obj.attributes.has(attr) else "Created"
obj.attributes.add(attr, value)
return "\n%s attribute %s/%s = %s" % (verb, obj.name, attr, repr(value))
except SyntaxError:
# this means literal_eval tried to parse a faulty string
return ("\n|RCritical Python syntax error in your value. Only "
"primitive Python structures are allowed.\nYou also "
"need to use correct Python syntax. Remember especially "
"to put quotes around all strings inside lists and "
"dicts.|n")
def edit_handler(self, obj, attr):
"""Activate the line editor"""
def load(caller):
"""Called for the editor to load the buffer"""
old_value = obj.attributes.get(attr)
if old_value is not None and not isinstance(old_value, basestring):
typ = type(old_value).__name__
self.caller.msg("|RWARNING! Saving this buffer will overwrite the "
"current attribute (of type %s) with a string!|n" % typ)
return str(old_value)
return old_value
def save(caller, buf):
"""Called when editor saves its buffer."""
obj.attributes.add(attr, buf)
caller.msg("Saved Attribute %s." % attr)
# start the editor
EvEditor(self.caller, load, save, key="%s/%s" % (obj, attr))
def search_for_obj(self, objname):
"""
Searches for an object matching objname. The object may be of different typeclasses.
Args:
objname: Name of the object we're looking for
Returns:
A typeclassed object, or None if nothing is found.
"""
from evennia.utils.utils import variable_from_module
_AT_SEARCH_RESULT = variable_from_module(*settings.SEARCH_AT_RESULT.rsplit('.', 1))
caller = self.caller
if objname.startswith('*') or "account" in self.switches:
found_obj = caller.search_account(objname.lstrip('*'))
elif "script" in self.switches:
found_obj = _AT_SEARCH_RESULT(search.search_script(objname), caller)
elif "channel" in self.switches:
found_obj = _AT_SEARCH_RESULT(search.search_channel(objname), caller)
else:
global_search = True
if "char" in self.switches or "character" in self.switches:
typeclass = settings.BASE_CHARACTER_TYPECLASS
elif "room" in self.switches:
typeclass = settings.BASE_ROOM_TYPECLASS
elif "exit" in self.switches:
typeclass = settings.BASE_EXIT_TYPECLASS
else:
global_search = False
typeclass = None
found_obj = caller.search(objname, global_search=global_search, typeclass=typeclass)
return found_obj
def func(self):
"""Implement the set attribute - a limited form of @py."""
caller = self.caller
if not self.args:
caller.msg("Usage: @set obj/attr = value. Use empty value to clear.")
return
# get values prepared by the parser
value = self.rhs
objname = self.lhs_objattr[0]['name']
attrs = self.lhs_objattr[0]['attrs']
obj = self.search_for_obj(objname)
if not obj:
return
if not self.check_obj(obj):
return
result = []
if "edit" in self.switches:
# edit in the line editor
if not (obj.access(self.caller, 'control') or obj.access(self.caller, 'edit')):
caller.msg("You don't have permission to edit %s." % obj.key)
return
if len(attrs) > 1:
caller.msg("The Line editor can only be applied "
"to one attribute at a time.")
return
self.edit_handler(obj, attrs[0])
return
if not value:
if self.rhs is None:
# no = means we inspect the attribute(s)
if not attrs:
attrs = [attr.key for attr in obj.attributes.all()]
for attr in attrs:
if not self.check_attr(obj, attr):
continue
result.append(self.view_attr(obj, attr))
# we view it without parsing markup.
self.caller.msg("".join(result).strip(), options={"raw": True})
return
else:
# deleting the attribute(s)
if not (obj.access(self.caller, 'control') or obj.access(self.caller, 'edit')):
caller.msg("You don't have permission to edit %s." % obj.key)
return
for attr in attrs:
if not self.check_attr(obj, attr):
continue
result.append(self.rm_attr(obj, attr))
else:
# setting attribute(s). Make sure to convert to real Python type before saving.
if not (obj.access(self.caller, 'control') or obj.access(self.caller, 'edit')):
caller.msg("You don't have permission to edit %s." % obj.key)
return
for attr in attrs:
if not self.check_attr(obj, attr):
continue
value = _convert_from_string(self, value)
result.append(self.set_attr(obj, attr, value))
# send feedback
caller.msg("".join(result).strip('\n'))
class CmdTypeclass(COMMAND_DEFAULT_CLASS):
"""
set or change an object's typeclass
Usage:
@typeclass[/switch] <object> [= typeclass.path]
@type ''
@parent ''
@typeclass/list/show [typeclass.path]
@swap - this is a shorthand for using /force/reset flags.
@update - this is a shorthand for using the /force/reload flag.
Switch:
show, examine - display the current typeclass of object (default) or, if
given a typeclass path, show the docstring of that typeclass.
update - *only* re-run at_object_creation on this object
meaning locks or other properties set later may remain.
reset - clean out *all* the attributes and properties on the
object - basically making this a new clean object.
force - change to the typeclass also if the object
already has a typeclass of the same name.
list - show available typeclasses.
Example:
@type button = examples.red_button.RedButton
If the typeclass_path is not given, the current object's
typeclass is assumed.
View or set an object's typeclass. If setting, the creation hooks
of the new typeclass will be run on the object. If you have
clashing properties on the old class, use /reset. By default you
are protected from changing to a typeclass of the same name as the
one you already have - use /force to override this protection.
The given typeclass must be identified by its location using
python dot-notation pointing to the correct module and class. If
no typeclass is given (or a wrong typeclass is given). Errors in
the path or new typeclass will lead to the old typeclass being
kept. The location of the typeclass module is searched from the
default typeclass directory, as defined in the server settings.
"""
key = "@typeclass"
aliases = ["@type", "@parent", "@swap", "@update"]
switch_options = ("show", "examine", "update", "reset", "force", "list")
locks = "cmd:perm(typeclass) or perm(Builder)"
help_category = "Building"
def func(self):
"""Implements command"""
caller = self.caller
if 'list' in self.switches:
tclasses = get_all_typeclasses()
contribs = [key for key in sorted(tclasses)
if key.startswith("evennia.contrib")] or ["<None loaded>"]
core = [key for key in sorted(tclasses)
if key.startswith("evennia") and key not in contribs] or ["<None loaded>"]
game = [key for key in sorted(tclasses)
if not key.startswith("evennia")] or ["<None loaded>"]
string = ("|wCore typeclasses|n\n"
" {core}\n"
"|wLoaded Contrib typeclasses|n\n"
" {contrib}\n"
"|wGame-dir typeclasses|n\n"
" {game}").format(core="\n ".join(core),
contrib="\n ".join(contribs),
game="\n ".join(game))
EvMore(caller, string, exit_on_lastpage=True)
return
if not self.args:
caller.msg("Usage: %s <object> [= typeclass]" % self.cmdstring)
return
if "show" in self.switches or "examine" in self.switches:
oquery = self.lhs
obj = caller.search(oquery, quiet=True)
if not obj:
# no object found to examine, see if it's a typeclass-path instead
tclasses = get_all_typeclasses()
matches = [(key, tclass)
for key, tclass in tclasses.items() if key.endswith(oquery)]
nmatches = len(matches)
if nmatches > 1:
caller.msg("Multiple typeclasses found matching {}:\n {}".format(
oquery, "\n ".join(tup[0] for tup in matches)))
elif not matches:
caller.msg("No object or typeclass path found to match '{}'".format(oquery))
else:
# one match found
caller.msg("Docstring for typeclass '{}':\n{}".format(
oquery, matches[0][1].__doc__))
else:
# do the search again to get the error handling in case of multi-match
obj = caller.search(oquery)
if not obj:
return
caller.msg("{}'s current typeclass is '{}.{}'".format(
obj.name, obj.__class__.__module__, obj.__class__.__name__))
return
# get object to swap on
obj = caller.search(self.lhs)
if not obj:
return
if not hasattr(obj, "__dbclass__"):
string = "%s is not a typed object." % obj.name
caller.msg(string)
return
new_typeclass = self.rhs or obj.path
if "show" in self.switches or "examine" in self.switches:
string = "%s's current typeclass is %s." % (obj.name, obj.__class__)
caller.msg(string)
return
if self.cmdstring == "@swap":
self.switches.append("force")
self.switches.append("reset")
elif self.cmdstring == "@update":
self.switches.append("force")
self.switches.append("update")
if not (obj.access(caller, "control") or obj.access(caller, 'edit')):
caller.msg("You are not allowed to do that.")
return
if not hasattr(obj, 'swap_typeclass'):
caller.msg("This object cannot have a type at all!")
return
is_same = obj.is_typeclass(new_typeclass, exact=True)
if is_same and 'force' not in self.switches:
string = "%s already has the typeclass '%s'. Use /force to override." % (obj.name, new_typeclass)
else:
update = "update" in self.switches
reset = "reset" in self.switches
hooks = "at_object_creation" if update else "all"
old_typeclass_path = obj.typeclass_path
# we let this raise exception if needed
obj.swap_typeclass(new_typeclass, clean_attributes=reset,
clean_cmdsets=reset, run_start_hooks=hooks)
if is_same:
string = "%s updated its existing typeclass (%s).\n" % (obj.name, obj.path)
else:
string = "%s changed typeclass from %s to %s.\n" % (obj.name,
old_typeclass_path,
obj.typeclass_path)
if update:
string += "Only the at_object_creation hook was run (update mode)."
else:
string += "All object creation hooks were run."
if reset:
string += " All old attributes where deleted before the swap."
else:
string += " Attributes set before swap were not removed."
caller.msg(string)
class CmdWipe(ObjManipCommand):
"""
clear all attributes from an object
Usage:
@wipe <object>[/<attr>[/<attr>...]]
Example:
@wipe box
@wipe box/colour
Wipes all of an object's attributes, or optionally only those
matching the given attribute-wildcard search string.
"""
key = "@wipe"
locks = "cmd:perm(wipe) or perm(Builder)"
help_category = "Building"
def func(self):
"""
inp is the dict produced in ObjManipCommand.parse()
"""
caller = self.caller
if not self.args:
caller.msg("Usage: @wipe <object>[/<attr>/<attr>...]")
return
# get the attributes set by our custom parser
objname = self.lhs_objattr[0]['name']
attrs = self.lhs_objattr[0]['attrs']
obj = caller.search(objname)
if not obj:
return
if not (obj.access(caller, "control") or obj.access(caller, 'edit')):
caller.msg("You are not allowed to do that.")
return
if not attrs:
# wipe everything
obj.attributes.clear()
string = "Wiped all attributes on %s." % obj.name
else:
for attrname in attrs:
obj.attributes.remove(attrname)
string = "Wiped attributes %s on %s."
string = string % (",".join(attrs), obj.name)
caller.msg(string)
class CmdLock(ObjManipCommand):
"""
assign a lock definition to an object
Usage:
@lock <object or *account>[ = <lockstring>]
or
@lock[/switch] <object or *account>/<access_type>
Switch:
del - delete given access type
view - view lock associated with given access type (default)
If no lockstring is given, shows all locks on
object.
Lockstring is of the form
access_type:[NOT] func1(args)[ AND|OR][ NOT] func2(args) ...]
Where func1, func2 ... valid lockfuncs with or without arguments.
Separator expressions need not be capitalized.
For example:
'get: id(25) or perm(Admin)'
The 'get' lock access_type is checked e.g. by the 'get' command.
An object locked with this example lock will only be possible to pick up
by Admins or by an object with id=25.
You can add several access_types after one another by separating
them by ';', i.e:
'get:id(25); delete:perm(Builder)'
"""
key = "@lock"
aliases = ["@locks"]
locks = "cmd: perm(locks) or perm(Builder)"
help_category = "Building"
def func(self):
"""Sets up the command"""
caller = self.caller
if not self.args:
string = "@lock <object>[ = <lockstring>] or @lock[/switch] " \
"<object>/<access_type>"
caller.msg(string)
return
if '/' in self.lhs:
# call of the form @lock obj/access_type
objname, access_type = [p.strip() for p in self.lhs.split('/', 1)]
obj = None
if objname.startswith("*"):
obj = caller.search_account(objname.lstrip('*'))
if not obj:
obj = caller.search(objname)
if not obj:
return
has_control_access = obj.access(caller, 'control')
if access_type == 'control' and not has_control_access:
# only allow to change 'control' access if you have 'control' access already
caller.msg("You need 'control' access to change this type of lock.")
return
if not (has_control_access or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
lockdef = obj.locks.get(access_type)
if lockdef:
if 'del' in self.switches:
obj.locks.delete(access_type)
string = "deleted lock %s" % lockdef
else:
string = lockdef
else:
string = "%s has no lock of access type '%s'." % (obj, access_type)
caller.msg(string)
return
if self.rhs:
# we have a = separator, so we are assigning a new lock
if self.switches:
swi = ", ".join(self.switches)
caller.msg("Switch(es) |w%s|n can not be used with a "
"lock assignment. Use e.g. "
"|w@lock/del objname/locktype|n instead." % swi)
return
objname, lockdef = self.lhs, self.rhs
obj = None
if objname.startswith("*"):
obj = caller.search_account(objname.lstrip('*'))
if not obj:
obj = caller.search(objname)
if not obj:
return
if not (obj.access(caller, 'control') or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
ok = False
lockdef = re.sub(r"\'|\"", "", lockdef)
try:
ok = obj.locks.add(lockdef)
except LockException as e:
caller.msg(str(e))
if "cmd" in lockdef.lower() and \
inherits_from(obj, "evennia.objects.objects.DefaultExit"):
# special fix to update Exits since "cmd"-type locks won't
# update on them unless their cmdsets are rebuilt.
obj.at_init()
if ok:
caller.msg("Added lock '%s' to %s." % (lockdef, obj))
return
# if we get here, we are just viewing all locks on obj
obj = None
if self.lhs.startswith("*"):
obj = caller.search_account(self.lhs.lstrip("*"))
if not obj:
obj = caller.search(self.lhs)
if not obj:
return
if not (obj.access(caller, 'control') or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
caller.msg("\n".join(obj.locks.all()))
class CmdExamine(ObjManipCommand):
"""
get detailed information about an object
Usage:
examine [<object>[/attrname]]
examine [*<account>[/attrname]]
Switch:
account - examine an Account (same as adding *)
object - examine an Object (useful when OOC)
The examine command shows detailed game info about an
object and optionally a specific attribute on it.
If object is not specified, the current location is examined.
Append a * before the search string to examine an account.
"""
key = "@examine"
aliases = ["@ex", "exam"]
locks = "cmd:perm(examine) or perm(Builder)"
help_category = "Building"
arg_regex = r"(/\w+?(\s|$))|\s|$"
account_mode = False
def list_attribute(self, crop, attr, value):
"""
Formats a single attribute line.
"""
if crop:
if not isinstance(value, basestring):
value = utils.to_str(value, force_string=True)
value = utils.crop(value)
value = utils.to_unicode(value)
string = "\n %s = %s" % (attr, value)
string = raw(string)
return string
def format_attributes(self, obj, attrname=None, crop=True):
"""
Helper function that returns info about attributes and/or
non-persistent data stored on object
"""
if attrname:
db_attr = [(attrname, obj.attributes.get(attrname))]
try:
ndb_attr = [(attrname, object.__getattribute__(obj.ndb, attrname))]
except Exception:
ndb_attr = None
else:
db_attr = [(attr.key, attr.value) for attr in obj.db_attributes.all()]
try:
ndb_attr = obj.nattributes.all(return_tuples=True)
except Exception:
ndb_attr = None
string = ""
if db_attr and db_attr[0]:
string += "\n|wPersistent attributes|n:"
for attr, value in db_attr:
string += self.list_attribute(crop, attr, value)
if ndb_attr and ndb_attr[0]:
string += "\n|wNon-Persistent attributes|n:"
for attr, value in ndb_attr:
string += self.list_attribute(crop, attr, value)
return string
def format_output(self, obj, avail_cmdset):
"""
Helper function that creates a nice report about an object.
returns a string.
"""
string = "\n|wName/key|n: |c%s|n (%s)" % (obj.name, obj.dbref)
if hasattr(obj, "aliases") and obj.aliases.all():
string += "\n|wAliases|n: %s" % (", ".join(utils.make_iter(str(obj.aliases))))
if hasattr(obj, "sessions") and obj.sessions.all():
string += "\n|wSession id(s)|n: %s" % (", ".join("#%i" % sess.sessid
for sess in obj.sessions.all()))
if hasattr(obj, "email") and obj.email:
string += "\n|wEmail|n: |c%s|n" % obj.email
if hasattr(obj, "has_account") and obj.has_account:
string += "\n|wAccount|n: |c%s|n" % obj.account.name
perms = obj.account.permissions.all()
if obj.account.is_superuser:
perms = ["<Superuser>"]
elif not perms:
perms = ["<None>"]
string += "\n|wAccount Perms|n: %s" % (", ".join(perms))
if obj.account.attributes.has("_quell"):
string += " |r(quelled)|n"
string += "\n|wTypeclass|n: %s (%s)" % (obj.typename,
obj.typeclass_path)
if hasattr(obj, "location"):
string += "\n|wLocation|n: %s" % obj.location
if obj.location:
string += " (#%s)" % obj.location.id
if hasattr(obj, "home"):
string += "\n|wHome|n: %s" % obj.home
if obj.home:
string += " (#%s)" % obj.home.id
if hasattr(obj, "destination") and obj.destination:
string += "\n|wDestination|n: %s" % obj.destination
if obj.destination:
string += " (#%s)" % obj.destination.id
perms = obj.permissions.all()
if perms:
perms_string = (", ".join(perms))
else:
perms_string = "<None>"
if obj.is_superuser:
perms_string += " [Superuser]"
string += "\n|wPermissions|n: %s" % perms_string
locks = str(obj.locks)
if locks:
locks_string = utils.fill("; ".join([lock for lock in locks.split(';')]), indent=6)
else:
locks_string = " Default"
string += "\n|wLocks|n:%s" % locks_string
if not (len(obj.cmdset.all()) == 1 and obj.cmdset.current.key == "_EMPTY_CMDSET"):
# all() returns a 'stack', so make a copy to sort.
stored_cmdsets = sorted(obj.cmdset.all(), key=lambda x: x.priority, reverse=True)
string += "\n|wStored Cmdset(s)|n:\n %s" % ("\n ".join("%s [%s] (%s, prio %s)" % (
cmdset.path, cmdset.key, cmdset.mergetype, cmdset.priority) for cmdset in stored_cmdsets
if cmdset.key != "_EMPTY_CMDSET"))
# this gets all components of the currently merged set
all_cmdsets = [(cmdset.key, cmdset) for cmdset in avail_cmdset.merged_from]
# we always at least try to add account- and session sets since these are ignored
# if we merge on the object level.
if hasattr(obj, "account") and obj.account:
all_cmdsets.extend([(cmdset.key, cmdset) for cmdset in obj.account.cmdset.all()])
if obj.sessions.count():
# if there are more sessions than one on objects it's because of multisession mode 3.
# we only show the first session's cmdset here (it is -in principle- possible that
# different sessions have different cmdsets but for admins who want such madness
# it is better that they overload with their own CmdExamine to handle it).
all_cmdsets.extend([(cmdset.key, cmdset) for cmdset in obj.account.sessions.all()[0].cmdset.all()])
else:
try:
# we have to protect this since many objects don't have sessions.
all_cmdsets.extend([(cmdset.key, cmdset)
for cmdset in obj.get_session(obj.sessions.get()).cmdset.all()])
except (TypeError, AttributeError):
# an error means we are merging an object without a session
pass
all_cmdsets = [cmdset for cmdset in dict(all_cmdsets).values()]
all_cmdsets.sort(key=lambda x: x.priority, reverse=True)
string += "\n|wMerged Cmdset(s)|n:\n %s" % ("\n ".join("%s [%s] (%s, prio %s)" % (
cmdset.path, cmdset.key, cmdset.mergetype, cmdset.priority) for cmdset in all_cmdsets))
# list the commands available to this object
avail_cmdset = sorted([cmd.key for cmd in avail_cmdset
if cmd.access(obj, "cmd")])
cmdsetstr = utils.fill(", ".join(avail_cmdset), indent=2)
string += "\n|wCommands available to %s (result of Merged CmdSets)|n:\n %s" % (obj.key, cmdsetstr)
if hasattr(obj, "scripts") and hasattr(obj.scripts, "all") and obj.scripts.all():
string += "\n|wScripts|n:\n %s" % obj.scripts
# add the attributes
string += self.format_attributes(obj)
# display Tags
tags_string = utils.fill(", ".join("%s[%s]" % (tag, category)
for tag, category in obj.tags.all(return_key_and_category=True)), indent=5)
if tags_string:
string += "\n|wTags[category]|n: %s" % tags_string.strip()
# add the contents
exits = []
pobjs = []
things = []
if hasattr(obj, "contents"):
for content in obj.contents:
if content.destination:
exits.append(content)
elif content.account:
pobjs.append(content)
else:
things.append(content)
if exits:
string += "\n|wExits|n: %s" % ", ".join(
["%s(%s)" % (exit.name, exit.dbref) for exit in exits])
if pobjs:
string += "\n|wCharacters|n: %s" % ", ".join(
["|c%s|n(%s)" % (pobj.name, pobj.dbref) for pobj in pobjs])
if things:
string += "\n|wContents|n: %s" % ", ".join(
["%s(%s)" % (cont.name, cont.dbref) for cont in obj.contents
if cont not in exits and cont not in pobjs])
separator = "-" * _DEFAULT_WIDTH
# output info
return '%s\n%s\n%s' % (separator, string.strip(), separator)
def func(self):
"""Process command"""
caller = self.caller
def get_cmdset_callback(cmdset):
"""
We make use of the cmdhandeler.get_and_merge_cmdsets below. This
is an asynchronous function, returning a Twisted deferred.
So in order to properly use this we need use this callback;
it is called with the result of get_and_merge_cmdsets, whenever
that function finishes. Taking the resulting cmdset, we continue
to format and output the result.
"""
string = self.format_output(obj, cmdset)
self.msg(string.strip())
if not self.args:
# If no arguments are provided, examine the invoker's location.
if hasattr(caller, "location"):
obj = caller.location
if not obj.access(caller, 'examine'):
# If we don't have special info access, just look at the object instead.
self.msg(caller.at_look(obj))
return
# using callback for printing result whenever function returns.
get_and_merge_cmdsets(obj, self.session, self.account, obj, "object",
self.raw_string).addCallback(get_cmdset_callback)
else:
self.msg("You need to supply a target to examine.")
return
# we have given a specific target object
for objdef in self.lhs_objattr:
obj = None
obj_name = objdef['name']
obj_attrs = objdef['attrs']
self.account_mode = utils.inherits_from(caller, "evennia.accounts.accounts.DefaultAccount") or \
"account" in self.switches or obj_name.startswith('*')
if self.account_mode:
try:
obj = caller.search_account(obj_name.lstrip('*'))
except AttributeError:
# this means we are calling examine from an account object
obj = caller.search(obj_name.lstrip('*'), search_object='object' in self.switches)
else:
obj = caller.search(obj_name)
if not obj:
continue
if not obj.access(caller, 'examine'):
# If we don't have special info access, just look
# at the object instead.
self.msg(caller.at_look(obj))
continue
if obj_attrs:
for attrname in obj_attrs:
# we are only interested in specific attributes
caller.msg(self.format_attributes(obj, attrname, crop=False))
else:
if obj.sessions.count():
mergemode = "session"
elif self.account_mode:
mergemode = "account"
else:
mergemode = "object"
# using callback to print results whenever function returns.
get_and_merge_cmdsets(obj, self.session, self.account, obj, mergemode, self.raw_string).addCallback(get_cmdset_callback)
class CmdFind(COMMAND_DEFAULT_CLASS):
"""
search the database for objects
Usage:
@find[/switches] <name or dbref or *account> [= dbrefmin[-dbrefmax]]
@locate - this is a shorthand for using the /loc switch.
Switches:
room - only look for rooms (location=None)
exit - only look for exits (destination!=None)
char - only look for characters (BASE_CHARACTER_TYPECLASS)
exact - only exact matches are returned.
loc - display object location if exists and match has one result
startswith - search for names starting with the string, rather than containing
Searches the database for an object of a particular name or exact #dbref.
Use *accountname to search for an account. The switches allows for
limiting object matches to certain game entities. Dbrefmin and dbrefmax
limits matches to within the given dbrefs range, or above/below if only
one is given.
"""
key = "@find"
aliases = "@search, @locate"
switch_options = ("room", "exit", "char", "exact", "loc", "startswith")
locks = "cmd:perm(find) or perm(Builder)"
help_category = "Building"
def func(self):
"""Search functionality"""
caller = self.caller
switches = self.switches
if not self.args:
caller.msg("Usage: @find <string> [= low [-high]]")
return
if "locate" in self.cmdstring: # Use option /loc as a default for @locate command alias
switches.append('loc')
searchstring = self.lhs
low, high = 1, ObjectDB.objects.all().order_by("-id")[0].id
if self.rhs:
if "-" in self.rhs:
# also support low-high syntax
limlist = [part.lstrip("#").strip() for part in self.rhs.split("-", 1)]
else:
# otherwise split by space
limlist = [part.lstrip("#") for part in self.rhs.split(None, 1)]
if limlist and limlist[0].isdigit():
low = max(low, int(limlist[0]))
if len(limlist) > 1 and limlist[1].isdigit():
high = min(high, int(limlist[1]))
low = min(low, high)
high = max(low, high)
is_dbref = utils.dbref(searchstring)
is_account = searchstring.startswith("*")
restrictions = ""
if self.switches:
restrictions = ", %s" % (", ".join(self.switches))
if is_dbref or is_account:
if is_dbref:
# a dbref search
result = caller.search(searchstring, global_search=True, quiet=True)
string = "|wExact dbref match|n(#%i-#%i%s):" % (low, high, restrictions)
else:
# an account search
searchstring = searchstring.lstrip("*")
result = caller.search_account(searchstring, quiet=True)
string = "|wMatch|n(#%i-#%i%s):" % (low, high, restrictions)
if "room" in switches:
result = result if inherits_from(result, ROOM_TYPECLASS) else None
if "exit" in switches:
result = result if inherits_from(result, EXIT_TYPECLASS) else None
if "char" in switches:
result = result if inherits_from(result, CHAR_TYPECLASS) else None
if not result:
string += "\n |RNo match found.|n"
elif not low <= int(result[0].id) <= high:
string += "\n |RNo match found for '%s' in #dbref interval.|n" % searchstring
else:
result = result[0]
string += "\n|g %s - %s|n" % (result.get_display_name(caller), result.path)
if "loc" in self.switches and not is_account and result.location:
string += " (|wlocation|n: |g{}|n)".format(result.location.get_display_name(caller))
else:
# Not an account/dbref search but a wider search; build a queryset.
# Searchs for key and aliases
if "exact" in switches:
keyquery = Q(db_key__iexact=searchstring, id__gte=low, id__lte=high)
aliasquery = Q(db_tags__db_key__iexact=searchstring,
db_tags__db_tagtype__iexact="alias", id__gte=low, id__lte=high)
elif "startswith" in switches:
keyquery = Q(db_key__istartswith=searchstring, id__gte=low, id__lte=high)
aliasquery = Q(db_tags__db_key__istartswith=searchstring,
db_tags__db_tagtype__iexact="alias", id__gte=low, id__lte=high)
else:
keyquery = Q(db_key__icontains=searchstring, id__gte=low, id__lte=high)
aliasquery = Q(db_tags__db_key__icontains=searchstring,
db_tags__db_tagtype__iexact="alias", id__gte=low, id__lte=high)
results = ObjectDB.objects.filter(keyquery | aliasquery).distinct()
nresults = results.count()
if nresults:
# convert result to typeclasses.
results = [result for result in results]
if "room" in switches:
results = [obj for obj in results if inherits_from(obj, ROOM_TYPECLASS)]
if "exit" in switches:
results = [obj for obj in results if inherits_from(obj, EXIT_TYPECLASS)]
if "char" in switches:
results = [obj for obj in results if inherits_from(obj, CHAR_TYPECLASS)]
nresults = len(results)
# still results after type filtering?
if nresults:
if nresults > 1:
string = "|w%i Matches|n(#%i-#%i%s):" % (nresults, low, high, restrictions)
for res in results:
string += "\n |g%s - %s|n" % (res.get_display_name(caller), res.path)
else:
string = "|wOne Match|n(#%i-#%i%s):" % (low, high, restrictions)
string += "\n |g%s - %s|n" % (results[0].get_display_name(caller), results[0].path)
if "loc" in self.switches and nresults == 1 and results[0].location:
string += " (|wlocation|n: |g{}|n)".format(results[0].location.get_display_name(caller))
else:
string = "|wMatch|n(#%i-#%i%s):" % (low, high, restrictions)
string += "\n |RNo matches found for '%s'|n" % searchstring
# send result
caller.msg(string.strip())
class CmdTeleport(COMMAND_DEFAULT_CLASS):
"""
teleport object to another location
Usage:
@tel/switch [<object> to||=] <target location>
Examples:
@tel Limbo
@tel/quiet box = Limbo
@tel/tonone box
Switches:
quiet - don't echo leave/arrive messages to the source/target
locations for the move.
intoexit - if target is an exit, teleport INTO
the exit object instead of to its destination
tonone - if set, teleport the object to a None-location. If this
switch is set, <target location> is ignored.
Note that the only way to retrieve
an object from a None location is by direct #dbref
reference. A puppeted object cannot be moved to None.
loc - teleport object to the target's location instead of its contents
Teleports an object somewhere. If no object is given, you yourself
is teleported to the target location.
"""
key = "@tel"
aliases = "@teleport"
switch_options = ("quiet", "intoexit", "tonone", "loc")
rhs_split = ("=", " to ") # Prefer = delimiter, but allow " to " usage.
locks = "cmd:perm(teleport) or perm(Builder)"
help_category = "Building"
def func(self):
"""Performs the teleport"""
caller = self.caller
args = self.args
lhs, rhs = self.lhs, self.rhs
switches = self.switches
# setting switches
tel_quietly = "quiet" in switches
to_none = "tonone" in switches
to_loc = "loc" in switches
if to_none:
# teleporting to None
if not args:
obj_to_teleport = caller
else:
obj_to_teleport = caller.search(lhs, global_search=True)
if not obj_to_teleport:
caller.msg("Did not find object to teleport.")
return
if obj_to_teleport.has_account:
caller.msg("Cannot teleport a puppeted object "
"(%s, puppeted by %s) to a None-location." % (
obj_to_teleport.key, obj_to_teleport.account))
return
caller.msg("Teleported %s -> None-location." % obj_to_teleport)
if obj_to_teleport.location and not tel_quietly:
obj_to_teleport.location.msg_contents("%s teleported %s into nothingness."
% (caller, obj_to_teleport),
exclude=caller)
obj_to_teleport.location = None
return
# not teleporting to None location
if not args and not to_none:
caller.msg("Usage: teleport[/switches] [<obj> =] <target_loc>||home")
return
if rhs:
obj_to_teleport = caller.search(lhs, global_search=True)
destination = caller.search(rhs, global_search=True)
else:
obj_to_teleport = caller
destination = caller.search(lhs, global_search=True)
if not obj_to_teleport:
caller.msg("Did not find object to teleport.")
return
if not destination:
caller.msg("Destination not found.")
return
if to_loc:
destination = destination.location
if not destination:
caller.msg("Destination has no location.")
return
if obj_to_teleport == destination:
caller.msg("You can't teleport an object inside of itself!")
return
if obj_to_teleport == destination.location:
caller.msg("You can't teleport an object inside something it holds!")
return
if obj_to_teleport.location and obj_to_teleport.location == destination:
caller.msg("%s is already at %s." % (obj_to_teleport, destination))
return
use_destination = True
if "intoexit" in self.switches:
use_destination = False
# try the teleport
if obj_to_teleport.move_to(destination, quiet=tel_quietly,
emit_to_obj=caller,
use_destination=use_destination):
if obj_to_teleport == caller:
caller.msg("Teleported to %s." % destination)
else:
caller.msg("Teleported %s -> %s." % (obj_to_teleport,
destination))
class CmdScript(COMMAND_DEFAULT_CLASS):
"""
attach a script to an object
Usage:
@script[/switch] <obj> [= script_path or <scriptkey>]
Switches:
start - start all non-running scripts on object, or a given script only
stop - stop all scripts on objects, or a given script only
If no script path/key is given, lists all scripts active on the given
object.
Script path can be given from the base location for scripts as given in
settings. If adding a new script, it will be started automatically
(no /start switch is needed). Using the /start or /stop switches on an
object without specifying a script key/path will start/stop ALL scripts on
the object.
"""
key = "@script"
aliases = "@addscript"
switch_options = ("start", "stop")
locks = "cmd:perm(script) or perm(Builder)"
help_category = "Building"
def func(self):
"""Do stuff"""
caller = self.caller
if not self.args:
string = "Usage: @script[/switch] <obj> [= script_path or <script key>]"
caller.msg(string)
return
if not self.lhs:
caller.msg("To create a global script you need |w@scripts/add <typeclass>|n.")
return
obj = caller.search(self.lhs)
if not obj:
return
result = []
if not self.rhs:
# no rhs means we want to operate on all scripts
scripts = obj.scripts.all()
if not scripts:
result.append("No scripts defined on %s." % obj.get_display_name(caller))
elif not self.switches:
# view all scripts
from evennia.commands.default.system import format_script_list
result.append(format_script_list(scripts))
elif "start" in self.switches:
num = sum([obj.scripts.start(script.key) for script in scripts])
result.append("%s scripts started on %s." % (num, obj.get_display_name(caller)))
elif "stop" in self.switches:
for script in scripts:
result.append("Stopping script %s on %s." % (script.get_display_name(caller),
obj.get_display_name(caller)))
script.stop()
obj.scripts.validate()
else: # rhs exists
if not self.switches:
# adding a new script, and starting it
ok = obj.scripts.add(self.rhs, autostart=True)
if not ok:
result.append("\nScript %s could not be added and/or started on %s." % (
self.rhs, obj.get_display_name(caller)))
else:
result.append("Script |w%s|n successfully added and started on %s." % (
self.rhs, obj.get_display_name(caller)))
else:
paths = [self.rhs] + ["%s.%s" % (prefix, self.rhs)
for prefix in settings.TYPECLASS_PATHS]
if "stop" in self.switches:
# we are stopping an already existing script
for path in paths:
ok = obj.scripts.stop(path)
if not ok:
result.append("\nScript %s could not be stopped. Does it exist?" % path)
else:
result = ["Script stopped and removed from object."]
break
if "start" in self.switches:
# we are starting an already existing script
for path in paths:
ok = obj.scripts.start(path)
if not ok:
result.append("\nScript %s could not be (re)started." % path)
else:
result = ["Script started successfully."]
break
caller.msg("".join(result).strip())
class CmdTag(COMMAND_DEFAULT_CLASS):
"""
handles the tags of an object
Usage:
@tag[/del] <obj> [= <tag>[:<category>]]
@tag/search <tag>[:<category]
Switches:
search - return all objects with a given Tag
del - remove the given tag. If no tag is specified,
clear all tags on object.
Manipulates and lists tags on objects. Tags allow for quick
grouping of and searching for objects. If only <obj> is given,
list all tags on the object. If /search is used, list objects
with the given tag.
The category can be used for grouping tags themselves, but it
should be used with restrain - tags on their own are usually
enough to for most grouping schemes.
"""
key = "@tag"
aliases = ["@tags"]
options = ("search", "del")
locks = "cmd:perm(tag) or perm(Builder)"
help_category = "Building"
arg_regex = r"(/\w+?(\s|$))|\s|$"
def func(self):
"""Implement the @tag functionality"""
if not self.args:
self.caller.msg("Usage: @tag[/switches] <obj> [= <tag>[:<category>]]")
return
if "search" in self.switches:
# search by tag
tag = self.args
category = None
if ":" in tag:
tag, category = [part.strip() for part in tag.split(":", 1)]
objs = search.search_tag(tag, category=category)
nobjs = len(objs)
if nobjs > 0:
catstr = " (category: '|w%s|n')" % category if category else \
("" if nobjs == 1 else " (may have different tag categories)")
matchstr = ", ".join(o.get_display_name(self.caller) for o in objs)
string = "Found |w%i|n object%s with tag '|w%s|n'%s:\n %s" % (nobjs,
"s" if nobjs > 1 else "",
tag,
catstr, matchstr)
else:
string = "No objects found with tag '%s%s'." % (tag,
" (category: %s)" % category if category else "")
self.caller.msg(string)
return
if "del" in self.switches:
# remove one or all tags
obj = self.caller.search(self.lhs, global_search=True)
if not obj:
return
if self.rhs:
# remove individual tag
tag = self.rhs
category = None
if ":" in tag:
tag, category = [part.strip() for part in tag.split(":", 1)]
if obj.tags.get(tag, category=category):
obj.tags.remove(tag, category=category)
string = "Removed tag '%s'%s from %s." % (
tag,
" (category: %s)" % category if category else "",
obj)
else:
string = "No tag '%s'%s to delete on %s." % (
tag,
" (category: %s)" % category if category else "",
obj)
else:
# no tag specified, clear all tags
old_tags = ["%s%s" % (tag, " (category: %s" % category if category else "")
for tag, category in obj.tags.all(return_key_and_category=True)]
if old_tags:
obj.tags.clear()
string = "Cleared all tags from %s: %s" % (obj, ", ".join(old_tags))
else:
string = "No Tags to clear on %s." % obj
self.caller.msg(string)
return
# no search/deletion
if self.rhs:
# = is found; command args are of the form obj = tag
obj = self.caller.search(self.lhs, global_search=True)
if not obj:
return
tag = self.rhs
category = None
if ":" in tag:
tag, category = [part.strip() for part in tag.split(":", 1)]
# create the tag
obj.tags.add(tag, category=category)
string = "Added tag '%s'%s to %s." % (tag,
" (category: %s)" % category if category else "",
obj)
self.caller.msg(string)
else:
# no = found - list tags on object
obj = self.caller.search(self.args, global_search=True)
if not obj:
return
tagtuples = obj.tags.all(return_key_and_category=True)
ntags = len(tagtuples)
tags = [tup[0] for tup in tagtuples]
categories = [" (category: %s)" % tup[1] if tup[1] else "" for tup in tagtuples]
if ntags:
string = "Tag%s on %s: %s" % ("s" if ntags > 1 else "", obj,
", ".join("'%s'%s" % (tags[i], categories[i]) for i in range(ntags)))
else:
string = "No tags attached to %s." % obj
self.caller.msg(string)
class CmdSpawn(COMMAND_DEFAULT_CLASS):
"""
spawn objects from prototype
Usage:
@spawn[/noloc] <prototype_key>
@spawn[/noloc] <prototype_dict>
@spawn/search [prototype_keykey][;tag[,tag]]
@spawn/list [tag, tag, ...]
@spawn/show [<prototype_key>]
@spawn/update <prototype_key>
@spawn/save <prototype_dict>
@spawn/edit [<prototype_key>]
@olc - equivalent to @spawn/edit
Switches:
noloc - allow location to be None if not specified explicitly. Otherwise,
location will default to caller's current location.
search - search prototype by name or tags.
list - list available prototypes, optionally limit by tags.
show, examine - inspect prototype by key. If not given, acts like list.
save - save a prototype to the database. It will be listable by /list.
delete - remove a prototype from database, if allowed to.
update - find existing objects with the same prototype_key and update
them with latest version of given prototype. If given with /save,
will auto-update all objects with the old version of the prototype
without asking first.
edit, olc - create/manipulate prototype in a menu interface.
Example:
@spawn GOBLIN
@spawn {"key":"goblin", "typeclass":"monster.Monster", "location":"#2"}
@spawn/save {"key": "grunt", prototype: "goblin"};;mobs;edit:all()
Dictionary keys:
|wprototype_parent |n - name of parent prototype to use. Required if typeclass is
not set. Can be a path or a list for multiple inheritance (inherits
left to right). If set one of the parents must have a typeclass.
|wtypeclass |n - string. Required if prototype_parent is not set.
|wkey |n - string, the main object identifier
|wlocation |n - this should be a valid object or #dbref
|whome |n - valid object or #dbref
|wdestination|n - only valid for exits (object or dbref)
|wpermissions|n - string or list of permission strings
|wlocks |n - a lock-string
|waliases |n - string or list of strings.
|wndb_|n<name> - value of a nattribute (ndb_ is stripped)
|wprototype_key|n - name of this prototype. Unique. Used to store/retrieve from db
and update existing prototyped objects if desired.
|wprototype_desc|n - desc of this prototype. Used in listings
|wprototype_locks|n - locks of this prototype. Limits who may use prototype
|wprototype_tags|n - tags of this prototype. Used to find prototype
any other keywords are interpreted as Attributes and their values.
The available prototypes are defined globally in modules set in
settings.PROTOTYPE_MODULES. If @spawn is used without arguments it
displays a list of available prototypes.
"""
key = "@spawn"
aliases = ["olc"]
switch_options = ("noloc", "search", "list", "show", "examine", "save", "delete", "menu", "olc", "update", "edit")
locks = "cmd:perm(spawn) or perm(Builder)"
help_category = "Building"
def func(self):
"""Implements the spawner"""
def _parse_prototype(inp, expect=dict):
err = None
try:
prototype = _LITERAL_EVAL(inp)
except (SyntaxError, ValueError) as err:
# treat as string
prototype = utils.to_str(inp)
finally:
if not isinstance(prototype, expect):
if err:
string = ("{}\n|RCritical Python syntax error in argument. Only primitive "
"Python structures are allowed. \nYou also need to use correct "
"Python syntax. Remember especially to put quotes around all "
"strings inside lists and dicts.|n For more advanced uses, embed "
"inline functions in the strings.".format(err))
else:
string = "Expected {}, got {}.".format(expect, type(prototype))
self.caller.msg(string)
return None
if expect == dict:
# an actual prototype. We need to make sure it's safe. Don't allow exec
if "exec" in prototype and not self.caller.check_permstring("Developer"):
self.caller.msg("Spawn aborted: You are not allowed to "
"use the 'exec' prototype key.")
return None
try:
# we homogenize first, to be more lenient
protlib.validate_prototype(protlib.homogenize_prototype(prototype))
except RuntimeError as err:
self.caller.msg(str(err))
return
return prototype
def _search_show_prototype(query, prototypes=None):
# prototype detail
if not prototypes:
prototypes = protlib.search_prototype(key=query)
if prototypes:
return "\n".join(protlib.prototype_to_str(prot) for prot in prototypes)
else:
return False
caller = self.caller
if self.cmdstring == "olc" or 'menu' in self.switches \
or 'olc' in self.switches or 'edit' in self.switches:
# OLC menu mode
prototype = None
if self.lhs:
key = self.lhs
prototype = protlib.search_prototype(key=key)
if len(prototype) > 1:
caller.msg("More than one match for {}:\n{}".format(
key, "\n".join(proto.get('prototype_key', '') for proto in prototype)))
return
elif prototype:
# one match
prototype = prototype[0]
else:
# no match
caller.msg("No prototype '{}' was found.".format(key))
return
olc_menus.start_olc(caller, session=self.session, prototype=prototype)
return
if 'search' in self.switches:
# query for a key match
if not self.args:
self.switches.append("list")
else:
key, tags = self.args.strip(), None
if ';' in self.args:
key, tags = (part.strip().lower() for part in self.args.split(";", 1))
tags = [tag.strip() for tag in tags.split(",")] if tags else None
EvMore(caller, unicode(protlib.list_prototypes(caller, key=key, tags=tags)),
exit_on_lastpage=True)
return
if 'show' in self.switches or 'examine' in self.switches:
# the argument is a key in this case (may be a partial key)
if not self.args:
self.switches.append('list')
else:
matchstring = _search_show_prototype(self.args)
if matchstring:
caller.msg(matchstring)
else:
caller.msg("No prototype '{}' was found.".format(self.args))
return
if 'list' in self.switches:
# for list, all optional arguments are tags
# import pudb; pudb.set_trace()
EvMore(caller, unicode(protlib.list_prototypes(caller,
tags=self.lhslist)), exit_on_lastpage=True)
return
if 'save' in self.switches:
# store a prototype to the database store
if not self.args:
caller.msg(
"Usage: @spawn/save <key>[;desc[;tag,tag[,...][;lockstring]]] = <prototype_dict>")
return
# handle rhs:
prototype = _parse_prototype(self.lhs.strip())
if not prototype:
return
# present prototype to save
new_matchstring = _search_show_prototype("", prototypes=[prototype])
string = "|yCreating new prototype:|n\n{}".format(new_matchstring)
question = "\nDo you want to continue saving? [Y]/N"
prototype_key = prototype.get("prototype_key")
if not prototype_key:
caller.msg("\n|yTo save a prototype it must have the 'prototype_key' set.")
return
# check for existing prototype,
old_matchstring = _search_show_prototype(prototype_key)
if old_matchstring:
string += "\n|yExisting saved prototype found:|n\n{}".format(old_matchstring)
question = "\n|yDo you want to replace the existing prototype?|n [Y]/N"
answer = yield(string + question)
if answer.lower() in ["n", "no"]:
caller.msg("|rSave cancelled.|n")
return
# all seems ok. Try to save.
try:
prot = protlib.save_prototype(**prototype)
if not prot:
caller.msg("|rError saving:|R {}.|n".format(prototype_key))
return
except protlib.PermissionError as err:
caller.msg("|rError saving:|R {}|n".format(err))
return
caller.msg("|gSaved prototype:|n {}".format(prototype_key))
# check if we want to update existing objects
existing_objects = protlib.search_objects_with_prototype(prototype_key)
if existing_objects:
if 'update' not in self.switches:
n_existing = len(existing_objects)
slow = " (note that this may be slow)" if n_existing > 10 else ""
string = ("There are {} objects already created with an older version "
"of prototype {}. Should it be re-applied to them{}? [Y]/N".format(
n_existing, prototype_key, slow))
answer = yield(string)
if answer.lower() in ["n", "no"]:
caller.msg("|rNo update was done of existing objects. "
"Use @spawn/update <key> to apply later as needed.|n")
return
n_updated = spawner.batch_update_objects_with_prototype(existing_objects, key)
caller.msg("{} objects were updated.".format(n_updated))
return
if not self.args:
ncount = len(protlib.search_prototype())
caller.msg("Usage: @spawn <prototype-key> or {{key: value, ...}}"
"\n ({} existing prototypes. Use /list to inspect)".format(ncount))
return
if 'delete' in self.switches:
# remove db-based prototype
matchstring = _search_show_prototype(self.args)
if matchstring:
string = "|rDeleting prototype:|n\n{}".format(matchstring)
question = "\nDo you want to continue deleting? [Y]/N"
answer = yield(string + question)
if answer.lower() in ["n", "no"]:
caller.msg("|rDeletion cancelled.|n")
return
try:
success = protlib.delete_prototype(self.args)
except protlib.PermissionError as err:
caller.msg("|rError deleting:|R {}|n".format(err))
caller.msg("Deletion {}.".format(
'successful' if success else 'failed (does the prototype exist?)'))
return
else:
caller.msg("Could not find prototype '{}'".format(key))
if 'update' in self.switches:
# update existing prototypes
key = self.args.strip().lower()
existing_objects = protlib.search_objects_with_prototype(key)
if existing_objects:
n_existing = len(existing_objects)
slow = " (note that this may be slow)" if n_existing > 10 else ""
string = ("There are {} objects already created with an older version "
"of prototype {}. Should it be re-applied to them{}? [Y]/N".format(
n_existing, key, slow))
answer = yield(string)
if answer.lower() in ["n", "no"]:
caller.msg("|rUpdate cancelled.")
return
n_updated = spawner.batch_update_objects_with_prototype(existing_objects, key)
caller.msg("{} objects were updated.".format(n_updated))
# A direct creation of an object from a given prototype
prototype = _parse_prototype(
self.args, expect=dict if self.args.strip().startswith("{") else basestring)
if not prototype:
# this will only let through dicts or strings
return
key = '<unnamed>'
if isinstance(prototype, basestring):
# A prototype key we are looking to apply
key = prototype
prototypes = protlib.search_prototype(prototype)
nprots = len(prototypes)
if not prototypes:
caller.msg("No prototype named '%s'." % prototype)
return
elif nprots > 1:
caller.msg("Found {} prototypes matching '{}':\n {}".format(
nprots, prototype, ", ".join(proto.get('prototype_key', '')
for proto in prototypes)))
return
# we have a prototype, check access
prototype = prototypes[0]
if not caller.locks.check_lockstring(
caller, prototype.get('prototype_locks', ''), access_type='spawn', default=True):
caller.msg("You don't have access to use this prototype.")
return
if "noloc" not in self.switches and "location" not in prototype:
prototype["location"] = self.caller.location
# proceed to spawning
try:
for obj in spawner.spawn(prototype):
self.caller.msg("Spawned %s." % obj.get_display_name(self.caller))
except RuntimeError as err:
caller.msg(err)
|
the-stack_106_28199 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 9 15:09:18 2021
@author: alef
"""
"""
For: aceita sequencias estáticas e iteradores.
Iteradores acessam elementos de forma sequencial
Com o for a referência aponta para cada elemento a cada iteração.
o break pode ser usado para interromper o laço
o continue passa direto para a proxima iteração
sintaxe
for <referência> in <sequencia>:
<bloco de código>
continue
break
else:
# este codigo é executado ao final do laço, a não ser que o laço tenha
# sido interrompido por break
<bloco de código>
"""
# soma de 0 a 99
s = 0
for x in range(1,100):
s = s + x
print(s) |
the-stack_106_28201 | import networks
class VrtNu(networks.ClosedTVNetwork):
def __init__(self, apikey, *args, **kwargs):
self.apikey = apikey
super(VrtNu, self).__init__(*args, **kwargs)
def login(self):
payload = {"loginID": self.username, "password": self.password,
"ApiKey": self.apikey, "authMode": "cookie", "includeSSOToken": "true", "targetEnv": "jssdk",
"sessionExpiration": -2}
resp = self.session.post(self.login_url, data=payload)
json_data = resp.json()
token = json_data['sessionInfo']['login_token']
pl2 = {"uid": json_data["UID"], "uidsig": json_data["UIDSignature"], "ts": json_data["signatureTimestamp"],
"fn": json_data["profile"]['firstName'], "ln": json_data["profile"]['lastName'],
"email": json_data["profile"]['email']}
cookie = {"glt_" + self.apikey: token}
headers = {"Origin": 'https://www.vrt.be',
"Referer": "https://www.vrt.be/vrtnu/"}
resp2 = self.session.post("https://token.vrt.be", cookies=cookie, json=pl2, headers=headers)
if resp2.status_code == 200:
self.logged_in = True
else:
self.logged_in = False
return self.logged_in
def download_show(self, url):
if not self.logged_in:
self.login()
req = self.session.get(url[:-1]+".securevideo.json")
json_resp = req.json()
data = next(iter(json_resp.values()))
req = self.session.get(self.url.format(**{'dl_id': data['mzid']}))
reply = req.json()
targets = reply['targetUrls']
if reply['subtitleUrls']:
r = self.session.get(reply['subtitleUrls'][0]['url'])
if r.status_code == 200:
with open('video_sub.vtt', 'wb') as f:
for chunk in r.iter_content(chunk_size=128):
f.write(chunk)
for target in targets:
if target['type'] == 'HLS':
super().download_show_all(url=target['url'])
def get_show_url(self, name):
raise NotImplementedError
def init(config):
return VrtNu(config["vrt"]['login']['apikey'], "vrt", config["vrt"]["download_url"], config["vrt"]['overview_url'],
config["vrt"]['login']['url'], config["vrt"]['login']['username'],
config["vrt"]['login']['password'])
|
the-stack_106_28203 | #!/usr/bin/env python3.7
import os
import subprocess
import re
import argparse
import sys
import glob
from Bio import SeqIO
def virus_pred(assembly_file, output_dir, virome_dataset, prok_mode):
"""Creates fasta file with viral contigs and putative prophages predicted with VirFinder_Euk_Mod and Virsorter"""
#VirFinder run using the VF.modEPV_k8.rda prediction model for prokaryotic and eukaryotic viruses
#The output of the script is a file recording the prediction results for all contigs and another file
#that only includes those results for which fdr < 0.1
#Input file must contain the dataset ID followed by _ at the beginning of the file name
#Contigs IDs must be comprised of the dataset ID and a sequential number separated by _
#if prok_mode:
# print("Running VirFinder only for prokaryotic viruses")
# subprocess.call("VirFinder_analysis_Prok.R -f %s -o %s" % (assembly_file, output_dir), stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL, shell = True)
#else:
# print("Running VirFinder for prokaryotic and eukaryotic viruses")
# subprocess.call("VirFinder_analysis_Euk.R -f %s -o %s" % (assembly_file, output_dir), stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL, shell = True)
#Path to directory containing VirSorter's databases
virsorter_data_path = "/hps/nobackup2/production/metagenomics/garp/databases/virsorter-data"
#Run VirSorter using the RefSeq + Virome database and the decontamination mode for viral enriched samples
#if virome_dataset = True, otherwise run VirSorter without desontamination mode
dataset_id = os.path.basename(assembly_file).split("_")[0]
virsorter_dir = os.path.join(output_dir, "%s_virsorter" % dataset_id)
os.mkdir(virsorter_dir)
if virome_dataset:
print("Running VirSorter with virome decontamination mode")
subprocess.call("wrapper_phage_contigs_sorter_iPlant.pl -f %s --db 2 --wdir %s --virome --data-dir %s" % (assembly_file, virsorter_dir, virsorter_data_path), stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL, shell = True)
else:
print("Running VirSorter without virome decontamination mode")
subprocess.call("wrapper_phage_contigs_sorter_iPlant.pl -f %s --db 2 --wdir %s --data-dir %s" % (assembly_file, virsorter_dir, virsorter_data_path), stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL, shell = True)
#viral_contigs_ids = []
#viral_contigs_desc = []
#record_list = []
#prophage_id = []
#Save ids and descriptions of contigs predicted as viral by VirFinder (fdr < 0.1)
#VirFinder_file = os.path.join(output_dir, re.search(r"%s\w+table-sig\.tab" % dataset_id, ",".join(os.listdir(output_dir))).group(0))
#with open(VirFinder_file) as input_file:
# if len(input_file.readlines()) > 1:
# input_file.seek(0)
# input_file.readline()
# for line in input_file:
# if line.split(" ")[0] not in viral_contigs_ids:
# viral_contigs_ids.append(line.split(" ")[0])
# viral_contigs_desc.append("%s_VirFinder" % re.search(r"length_\d+", line).group(0))
#VirSorter_viral_list = [x for x in glob.glob(os.path.join(virsorter_dir, "Predicted_viral_sequences", "*")) if re.search(r"cat-[12]\.fasta", x)]
#VirSorter_prophage_list = [x for x in glob.glob(os.path.join(virsorter_dir, "Predicted_viral_sequences", "*")) if re.search(r"cat-[45]\.fasta", x)]
#Save ids of contigs predicted as viral by VirSorter and that are different from those reported by VirFinder
#for item in VirSorter_viral_list:
# if os.stat(item).st_size != 0:
# with open(item) as input_file:
# id_search = re.compile(r"%s_\d+" % dataset_id)
# prop_search = re.compile(r"(length_\d+)[a-z0-9_-]+(cat_\d)")
# for line in input_file:
# if id_search.search(line) and id_search.search(line).group(0) not in viral_contigs_ids:
# viral_contigs_ids.append(id_search.search(line).group(0))
# viral_contigs_desc.append("%s_VirSorter_%s" % (prop_search.search(line).group(1), prop_search.search(line).group(2)))
# elif id_search.search(line):
# target_index = viral_contigs_ids.index(id_search.search(line).group(0))
# viral_contigs_desc[target_index] = viral_contigs_desc[target_index] + "_VirSorter_%s" % re.search(r"cat_\d$", line).group(0)
#Save ids and SeqRecord objects of putative prophages identified by VirSorter
#for item in VirSorter_prophage_list:
# if os.stat(item).st_size != 0:
# for record in SeqIO.parse(item, "fasta"):
# record.id = id_search.search(record.id).group(0)
# record.description = "%s_VirSorter_%s" % (prop_search.search(record.description).group(1), prop_search.search(record.description).group(2))
# prophage_id.append(record.id)
# record_list.append(record)
#Add Pro suffix to ids of prophage SeqRecords and a sequential number if more than one prophage were predicted in the same contig
#if len(record_list) > 0:
# indices = []
# for i,j in enumerate(record_list):
# if prophage_id.count(j.id) > 1 and i not in indices:
# num = 1
# j.id = j.id + "-Pro-%s" % num
# num += 1
# for x in range(i+1, len(record_list)):
# if record_list[x].id == re.split("-", j.id)[0]:
# record_list[x].id = record_list[x].id + "-Pro-%s" % num
# num += 1
# indices.append(x)
# elif i not in indices:
# j.id = j.id + "-Pro"
#Retrieve SeqRecord objects of viral contigs predicted by VirFinder and VirSorter
#if len(viral_contigs_ids) > 0:
# for position, contig in enumerate(viral_contigs_ids):
# for record in SeqIO.parse(assembly_file, "fasta"):
# if record.id == contig:
# record.description = viral_contigs_desc[position]
# record_list.append(record)
# break
#return record_list
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Write fasta file with predicted viral contigs and putative prophages")
parser.add_argument("-f", "--fasta", dest = "input_file", help = "Assembly fasta file", required = True)
parser.add_argument("-o", "--outdir", dest = "outdir", help = "output directory", default = ".")
parser.add_argument("--virome", dest = "virome", action = "store_true", help = "Indicate whether you are processing a metagenomic or viromic dataset")
parser.add_argument("--prok", dest = "mode", action = "store_true", help = "Indicate whether you would like the pipeline to focus only on prokaryotic viruses")
if len(sys.argv) == 1:
parser.print_help()
else:
args = parser.parse_args()
input_file = args.input_file
#dataset_id = os.path.basename(input_file).split("_")[0]
#viral_sequences = virus_pred(input_file, args.outdir, args.virome, args.mode)
#SeqIO.write(viral_sequences, os.path.join(args.outdir, "%s_viral_sequences.fna" % dataset_id), "fasta")
virus_pred(input_file, args.outdir, args.virome, args.mode)
|
the-stack_106_28204 | import os
import sys
import torch
sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.getcwd(), 'generative_inpainting'))
import argparse
from exp.loaddata_utils import ImageNetLoadClass
from exp.general_utils import Timer
import numpy as np
import os
from arch.sensitivity.BDNet import OppositeGernarativeL1BDNet, GernarativeL1BDNet
import exp.utils_visualise as utils_visualise
from exp.utils_flipping import get_logodds_by_flipping
from arch.sensitivity.BBMPNet import BBMP_SDR_Generative, BBMP_SSR_Generative
import torch.nn.functional as F
import tensorflow as tf
import visdom
import utils_model
def main(args, importance_func, impant_model, interpret_net):
if args.cuda:
interpret_net.cuda()
impant_model.cuda()
# Load data
load_helper = ImageNetLoadClass(imagenet_folder=args.data_dir,
dataset=args.dataset)
for img_idx in range(args.image_offset, args.image_offset + args.num_imgs):
print('img_idx:', img_idx)
img_loader, img_filename, gt_class_name, pred_class_name, classifier_output = \
load_helper.get_imgnet_one_image_loader(trained_classifier=interpret_net,
img_index=img_idx,
batch_size=args.batch_size,
target_label='gt',
cuda=args.cuda)
output_file = '%s/%s_records.th' % (args.save_dir, img_filename)
if not args.overwrite and os.path.exists(output_file):
print('Already evaluate for the img idx %d' % img_idx)
continue
# Take out one mnist image and unnormalize it
images, targets = next(iter(img_loader))
orig_img = images[0:1, ...]
unnormalized_img = load_helper.unnormalize_imagenet_img(orig_img)[0, ...]
if args.visdom_enabled:
visdom.Visdom().image(load_helper.unnormalize_imagenet_img(img_loader[0][0][0]))
with Timer('evaluating image'):
impant_model.reset()
imp_vector = importance_func(interpret_net, impant_model, img_loader)
overlayed_img, clim = utils_visualise.get_overlayed_image(unnormalized_img, imp_vector)
if args.visdom_enabled:
visdom.Visdom().image(overlayed_img)
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
# Visualize images
# file_name = '%s/%s_overlayed.png' % (args.save_dir, img_filename)
# utils_visualise.plot_orig_and_overlay_img(unnormalized_img, overlayed_img,
# bbox_coord=coord_arr[0][1],
# file_name=file_name,
# gt_class_name=gt_class_name,
# pred_class_name=pred_class_name,
# clim=clim,
# visualize=args.visualize)
torch.save({
'unnormalized_img': unnormalized_img,
'imp_vector': imp_vector,
'img_idx': img_idx,
'classifier_output': classifier_output,
'gnd_truth_label': targets[0],
}, output_file)
def parse_args():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Imagenet Example')
parser.add_argument('--importance-method', type=str, default='vbd_sdr',
help='choose from ["p_b", "vbd_ssr", "vbd_sdr", "bbmp_ssr", "bbmp_sdr"]')
parser.add_argument('--classifier', type=str, default='alexnet',
help='Choose from [alexnet, resnet18, vgg19_bn]')
parser.add_argument('--window', type=int, default=1)
parser.add_argument('--num-samples', type=int, default=1)
parser.add_argument('--prior', type=float, default=0.5,
help='prior probability for reg loss')
parser.add_argument('--batch-size', type=int, default=4, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=50, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.05, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--ard_init', type=float, default=0.,
help='ARD initialization')
parser.add_argument('--reg-coef', type=float, default=0.01,
help='regularization coefficient')
parser.add_argument('--tv_coef', type=float, default=0.,
help='regularization coefficient')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--epoch-print', type=int, default=1,
help='how many epochs to wait before logging training status')
parser.add_argument('--data-dir', type=str, default='../imagenet/',
help='data directory')
parser.add_argument('--gen-model-path', type=str, default=None, help='data directory')
parser.add_argument('--gen-model-name', type=str,
default='ImpantingModel',
help='choose from [ImpantingModel, VAEImpantModel, VAEWithVarImpantModel,'
'VAEWithVarImpantModelMean, MeanInpainter, LocalMeanInpainter]')
parser.add_argument('--dataset', type=str, default='valid/',
help='Choose from train/ or valid/')
parser.add_argument('--save-dir', type=str, default='./imgs/hole_model_0.01/',
help='Save directory')
parser.add_argument('--save-tag', type=str, default='',
help='Unique tag for output images')
parser.add_argument('--verbose', type=int, default=1,
help='Open verbose or not')
parser.add_argument('--num-imgs', type=int, default=1,
help='number of images to produce')
parser.add_argument('--gpu-ids', nargs='+', type=int, default=[],
help='number of gpus to produce')
parser.add_argument('--visualize', action='store_true')
parser.add_argument('--image-offset', type=int, default=0, help='offset for index of image')
parser.add_argument('--bvlc_dir', type=str, default='nets/BVLC/',
help='bvlr directory')
parser.add_argument('--gan_g_dir', type=str, default='nets/GAN/',
help='gan generator directory')
parser.add_argument('--eval-samples', type=int, default=1,
help='number of samples in evaluation')
parser.add_argument('--dropout_param_size', nargs='+', type=int, default=[56, 56],
help='Dropout parameter size')
parser.add_argument('--rw_max', type=int, default=30, help='annealing')
parser.add_argument('--visdom_enabled', action='store_true', default=False)
parser.add_argument('--overwrite', action='store_true', default=False)
args = parser.parse_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
tf.set_random_seed(args.seed)
# If use bbmp, batch size needs to be 1
if args.importance_method.startswith('bbmp'):
args.batch_size = 1
args.cuda = (not args.no_cuda) and torch.cuda.is_available()
if args.cuda and torch.cuda.current_device() != args.gpu_ids[0]:
torch.cuda.set_device(args.gpu_ids[0])
print('args:', args)
print('==================== Start =====================')
print('')
return args
def log_sum_exp(x, dim):
x_max = x.max()
return torch.log(torch.sum(torch.exp(x - x_max), dim=dim)) + x_max
def log_odds_loss(outputs, targets):
log_prob = F.log_softmax(outputs, dim=1)
if targets.data[0] == 0:
other_log_prob = log_prob[:, (targets.data[0] + 1):]
elif targets.data[0] == log_prob.size(1) - 1:
other_log_prob = log_prob[:, :targets.data[0]]
else:
other_log_prob = torch.cat([log_prob[:, :targets.data[0]],
log_prob[:, (targets.data[0] + 1):]], dim=1)
tmp = log_sum_exp(other_log_prob, dim=1)
return -(log_prob[:, targets.data[0]] - tmp).mean()
if __name__ == '__main__':
args = parse_args()
def vbd_ssr(interpret_net, impant_model, img_loader):
bdnet = GernarativeL1BDNet
return _vbd_shared(bdnet, interpret_net, impant_model, img_loader)
def vbd_sdr(interpret_net, impant_model, img_loader):
bdnet = OppositeGernarativeL1BDNet
color_vector = _vbd_shared(bdnet, interpret_net, impant_model, img_loader)
return -color_vector
def _vbd_shared(bdnet, interpret_net, impant_model, img_loader):
net = bdnet(dropout_param_size=(1, args.dropout_param_size[0], args.dropout_param_size[1]),
trained_classifier=interpret_net, generative_model=impant_model,
loss_criteria=log_odds_loss,
ard_init=args.ard_init, lr=args.lr, reg_coef=args.reg_coef,
tv_coef=args.tv_coef, rw_max=args.rw_max,
cuda_enabled=args.cuda, verbose=args.verbose, prior_p=args.prior,
upsample_to_size=(224, 224), visdom_enabled=args.visdom_enabled)
# Train it
net.fit(img_loader, epochs=args.epochs, epoch_print=args.epoch_print)
# Visualize the keep probability
keep_probability = net.get_importance_vector()
print('range: (%.3f, %.3f), shape: %s' % (keep_probability.min(), keep_probability.max(),
str(keep_probability.size())))
color_vector = (keep_probability - 0.5).cpu().numpy()
# sample_berns = net.sampled_from_logit_p(args.eval_samples)
return color_vector
def bbmp_ssr(interpret_net, impant_model, img_loader):
bbmpnet = BBMP_SSR_Generative
return _bbmp_common(bbmpnet, interpret_net, impant_model, img_loader)
def bbmp_sdr(interpret_net, impant_model, img_loader):
bbmpnet = BBMP_SDR_Generative
return _bbmp_common(bbmpnet, interpret_net, impant_model, img_loader)
def _bbmp_common(bbmpnet, interpret_net, impant_model, img_loader):
imgs, targets = next(iter(img_loader))
new_loader = [(imgs[0:1, ...], targets[0:1])]
net = bbmpnet(dropout_param_size=(1, args.dropout_param_size[0], args.dropout_param_size[1]),
trained_classifier=interpret_net, generative_model=impant_model, loss_criteria=log_odds_loss,
ard_init=1., lr=args.lr, reg_coef=args.reg_coef, tv_coef=args.tv_coef, rw_max=1,
cuda_enabled=args.cuda, verbose=args.verbose,
upsample_to_size=(224, 224), visdom_enabled=args.visdom_enabled)
net.fit(new_loader, epochs=args.epochs, epoch_print=args.epoch_print)
# Visualize by mask
keep_probability = net.get_importance_vector()
print('range: (%.3f, %.3f), shape: %s' % (keep_probability.min(), keep_probability.max(),
str(keep_probability.shape)))
color_vector = (keep_probability - 0.5).cpu().numpy()
return color_vector
def p_b(interpret_net, impant_model, img_loader):
# Prevent myself too stupid...
interpret_net.eval()
# Take out origisnal imgs and targets
imgs, targets = next(iter(img_loader))
orig_img = imgs[0:1, ...]
orig_target = targets[0]
# All the inputs dimension
N, channel, dim1, dim2 = imgs.size()
width = dim2 - args.window + 1
height = dim1 - args.window + 1
# Mask generator
def mask_generator():
for i in range(height):
for j in range(width):
mask = torch.ones(1, 1, dim1, dim2)
mask[:, :, i:(i + args.window), j:(j + args.window)] = 0.
yield mask
orig_odds, all_log_odds = get_logodds_by_flipping(
mask_generator(), interpret_net, impant_model, img_loader,
batch_size=args.batch_size, num_samples=args.num_samples, window=args.window,
cuda_enabled=args.cuda)
perturb_rank = np.zeros((dim1, dim2))
count = np.zeros((dim1, dim2))
for i in range(height):
for j in range(width):
perturb_rank[i:(i + args.window), j:(j + args.window)] \
+= (orig_odds - all_log_odds[i * width + j])
count[i:(i + args.window), j:(j + args.window)] += 1
return perturb_rank / count
# Load which method to interpret importance
importance_func = eval(args.importance_method)
# Load which impanting model you want to use
impant_model = utils_model.get_impant_model(args.gen_model_name, args.batch_size, args.gen_model_path)
interpret_net = utils_model.get_pretrained_classifier(args.classifier)
main(args, importance_func, impant_model, interpret_net)
|
the-stack_106_28206 | import csv
from photo_radar import models
from photo_radar.fetchers import canada
def save_as_csv():
with open("photo_radar.csv", "w") as f:
writer = csv.writer(f)
writer.writerow(
[
"id",
"street",
"city",
"provience",
"country",
"lat",
"lon",
"is_red_light_enabled",
"is_speed_enabled",
]
)
for camera in models.Camera.select():
writer.writerow(
[
camera.id,
camera.street,
camera.city,
camera.provience,
camera.country,
camera.lat,
camera.lon,
camera.is_red_light_enabled,
camera.is_speed_enabled,
]
)
if __name__ == "__main__":
models.create_tables()
canada.bc.run()
save_as_csv()
|
the-stack_106_28209 | import abc
from logging import getLogger
from os import path
import pandas as pd
from clinica.utils.inputs import check_caps_folder
logger = getLogger("clinicadl")
class SplitManager:
def __init__(
self,
caps_directory,
tsv_path,
diagnoses,
baseline=False,
multi_cohort=False,
split_list=None,
):
self._check_tsv_path(tsv_path, multi_cohort)
self.tsv_path = tsv_path
self.caps_dict = self._create_caps_dict(caps_directory, multi_cohort)
self.multi_cohort = multi_cohort
self.diagnoses = diagnoses
self.baseline = baseline
self.split_list = split_list
@abc.abstractmethod
def max_length(self) -> int:
"""Maximum number of splits"""
pass
@abc.abstractmethod
def __len__(self):
pass
@property
@abc.abstractmethod
def allowed_splits_list(self):
"""
List of possible splits if no restriction was applied
Returns:
list[int]: list of all possible splits
"""
pass
def __getitem__(self, item):
"""
Returns a dictionary of DataFrames with train and validation data.
Args:
item (int): Index of the split wanted.
Returns:
Dict[str:pd.DataFrame]: dictionary with two keys (train and validation).
"""
self._check_item(item)
if self.multi_cohort:
tsv_df = pd.read_csv(self.tsv_path, sep="\t")
train_df = pd.DataFrame()
valid_df = pd.DataFrame()
found_diagnoses = set()
for idx in range(len(tsv_df)):
cohort_name = tsv_df.loc[idx, "cohort"]
cohort_path = tsv_df.loc[idx, "path"]
cohort_diagnoses = (
tsv_df.loc[idx, "diagnoses"].replace(" ", "").split(",")
)
if bool(set(cohort_diagnoses) & set(self.diagnoses)):
target_diagnoses = list(set(cohort_diagnoses) & set(self.diagnoses))
cohort_train_df, cohort_valid_df = self.concatenate_diagnoses(
item, cohort_path=cohort_path, cohort_diagnoses=target_diagnoses
)
cohort_train_df["cohort"] = cohort_name
cohort_valid_df["cohort"] = cohort_name
train_df = pd.concat([train_df, cohort_train_df])
valid_df = pd.concat([valid_df, cohort_valid_df])
found_diagnoses = found_diagnoses | (
set(cohort_diagnoses) & set(self.diagnoses)
)
if found_diagnoses != set(self.diagnoses):
raise ValueError(
f"The diagnoses found in the multi cohort dataset {found_diagnoses} "
f"do not correspond to the diagnoses wanted {set(self.diagnoses)}."
)
train_df.reset_index(inplace=True, drop=True)
valid_df.reset_index(inplace=True, drop=True)
else:
train_df, valid_df = self.concatenate_diagnoses(item)
train_df["cohort"] = "single"
valid_df["cohort"] = "single"
return {
"train": train_df,
"validation": valid_df,
}
def concatenate_diagnoses(self, split, cohort_path=None, cohort_diagnoses=None):
"""Concatenated the diagnoses needed to form the train and validation sets."""
train_df, valid_df = pd.DataFrame(), pd.DataFrame()
train_path, valid_path = self._get_tsv_paths(
split=split,
cohort_path=cohort_path if cohort_path is not None else self.tsv_path,
)
logger.debug(f"Training data loaded at {train_path}")
logger.debug(f"Validation data loaded at {valid_path}")
if cohort_diagnoses is None:
cohort_diagnoses = self.diagnoses
for diagnosis in cohort_diagnoses:
if self.baseline:
train_diagnosis_path = path.join(
train_path, diagnosis + "_baseline.tsv"
)
else:
train_diagnosis_path = path.join(train_path, diagnosis + ".tsv")
valid_diagnosis_path = path.join(valid_path, diagnosis + "_baseline.tsv")
train_diagnosis_df = pd.read_csv(train_diagnosis_path, sep="\t")
valid_diagnosis_df = pd.read_csv(valid_diagnosis_path, sep="\t")
train_df = pd.concat([train_df, train_diagnosis_df])
valid_df = pd.concat([valid_df, valid_diagnosis_df])
train_df.reset_index(inplace=True, drop=True)
valid_df.reset_index(inplace=True, drop=True)
return train_df, valid_df
@abc.abstractmethod
def _get_tsv_paths(self, cohort_path, split):
"""
Computes the paths to the TSV files needed depending on the split structure.
Args:
cohort_path (str): path to the split structure of a cohort.
split (int): Index of the split.
Returns:
train_path (str): path to the directory containing training data.
valid_path (str): path to the directory containing validation data.
"""
pass
@abc.abstractmethod
def split_iterator(self):
"""Returns an iterable to iterate on all splits wanted."""
pass
def _check_item(self, item):
if item not in self.allowed_splits_list:
raise ValueError(
f"Split index {item} out of allowed splits {self.allowed_splits_list}."
)
@staticmethod
def _create_caps_dict(caps_directory, multi_cohort):
if multi_cohort:
if not caps_directory.endswith(".tsv"):
raise ValueError(
"If multi_cohort is given, the caps_dir argument should be a path to a TSV file."
)
else:
caps_df = pd.read_csv(caps_directory, sep="\t")
SplitManager._check_multi_cohort_tsv(caps_df, "CAPS")
caps_dict = dict()
for idx in range(len(caps_df)):
cohort = caps_df.loc[idx, "cohort"]
caps_path = caps_df.loc[idx, "path"]
check_caps_folder(caps_path)
caps_dict[cohort] = caps_path
else:
check_caps_folder(caps_directory)
caps_dict = {"single": caps_directory}
return caps_dict
@staticmethod
def _check_tsv_path(tsv_path, multi_cohort):
if multi_cohort:
if not tsv_path.endswith(".tsv"):
raise ValueError(
"If multi_cohort is given, the tsv_path argument should be a path to a TSV file."
)
else:
tsv_df = pd.read_csv(tsv_path, sep="\t")
SplitManager._check_multi_cohort_tsv(tsv_df, "labels")
else:
if tsv_path.endswith(".tsv"):
raise ValueError(
f"You gave the path to a TSV file in tsv_path {tsv_path}. "
f"To use multi-cohort framework, please add --multi_cohort flag."
)
@staticmethod
def _check_multi_cohort_tsv(tsv_df, purpose):
if purpose.upper() == "CAPS":
mandatory_col = {"cohort", "path"}
else:
mandatory_col = {"cohort", "path", "diagnoses"}
if not mandatory_col.issubset(tsv_df.columns.values):
raise ValueError(
f"Columns of the TSV file used for {purpose} location must include {mandatory_col}."
)
|
the-stack_106_28210 | """257. Binary Tree Paths
https://leetcode.com/problems/binary-tree-paths/
Given a binary tree, return all root-to-leaf paths.
Note: A leaf is a node with no children.
Example:
Input:
1
/ \
2 3
\
5
Output: ["1->2->5", "1->3"]
Explanation: All root-to-leaf paths are: 1->2->5, 1->3
"""
from typing import List
from common.tree_node import TreeNode
class Solution:
def binary_tree_paths(self, root: TreeNode) -> List[str]:
def dfs(node: TreeNode, path: str):
if not node.left and not node.right: # leaf node
ans.append(path)
return
if node.left:
dfs(node.left, path + '->' + str(node.left.val))
if node.right:
dfs(node.right, path + '->' + str(node.right.val))
if not root:
return []
ans = []
dfs(root, str(root.val))
return ans
|
the-stack_106_28212 | import time
from constants import *
# All selenium imports
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
# All the common utilities that will be used by web, desktop and mobile platforms.
def find_element(driver, timeout_seconds, mode, xpath):
if mode == "NAME":
return WebDriverWait(driver, timeout_seconds).until(
EC.visibility_of_element_located((By.NAME, xpath))
)
elif mode == "ID":
return WebDriverWait(driver, timeout_seconds).until(
EC.visibility_of_element_located((By.ID, xpath))
)
elif mode == "CLASS_NAME":
return WebDriverWait(driver, timeout_seconds).until(
EC.visibility_of_element_located((By.CLASS_NAME, xpath))
)
else:
return WebDriverWait(driver, timeout_seconds).until(
EC.visibility_of_element_located((By.XPATH, xpath))
)
def execute_action(driver, command, element):
if command[TYPE] == CLICK_ACTION:
ActionChains(driver).move_to_element(element).click().perform()
elif command[TYPE] == CLICK_IF_PRESENT_ACTION:
try:
ActionChains(driver).move_to_element(element).click().perform()
except Exception:
print("[LOG][Not Found Element] click if present : ", element)
elif command[TYPE] == HOVER_ACTION:
ActionChains(driver).move_to_element(element).perform()
elif command[TYPE] == TYPE_ACTION:
element.send_keys(command[ARGS][INPUT])
elif command[TYPE] == WAIT_UNTIL_ACTION:
pass
elif command[TYPE] == ASSERT_ACTION:
ActionChains(driver).move_to_element(element).perform()
else:
raise Exception("[Error] Command Type Not Found : ", command[TYPE])
def execute_non_element_action(driver, command):
# Here are all the commands that don't require finding an element.
if command[TYPE] == WAIT_ACTION:
time.sleep(int(command[ARGS][SUBJECT]))
return True
elif command[TYPE] == EXECJS_ACTION:
driver.execute_script(command[ARGS][SUBJECT])
return True
else:
return False
|
the-stack_106_28213 | from sqlpuzzle._common import check_type_decorator
from .queryparts import QueryPart
__all__ = ('Limit',)
class Limit(QueryPart):
def __init__(self, limit=None, offset=None):
super().__init__()
self._limit = limit
self._offset = offset
def __str__(self):
res = ''
if self._limit is not None:
res += 'LIMIT {}'.format(self._limit)
if self._offset is not None:
if res:
res += ' '
res += 'OFFSET {}'.format(self._offset)
return res
def __eq__(self, other):
return (
type(self) == type(other) and
self._limit == other._limit and
self._offset == other._offset
)
@property
def is_set(self):
return self._limit is not None or self._offset is not None
@check_type_decorator((type(None), int))
def limit(self, limit, offset=None):
if limit is None:
self._limit = None
self._offset = None
else:
self._limit = int(limit)
if offset is not None:
self.offset(offset)
return self
@check_type_decorator((type(None), int))
def offset(self, offset):
self._offset = int(offset) if offset else None
return self
|
the-stack_106_28214 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.0 upgrader."""
import inspect
import os
import tempfile
from absl.testing import parameterized
import six
import tensorflow.compat.v1 as tf
# OSS TF V2 import placeholder.
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade_v2
def get_symbol_for_name(root, name):
name_parts = six.ensure_str(name).split(".")
symbol = root
# Iterate starting with second item since 1st item is "tf.".
for part in name_parts[1:]:
symbol = getattr(symbol, part)
return symbol
def get_args(symbol):
if hasattr(inspect, "signature"):
signature = inspect.signature(symbol)
# Ignore *args and **kwargs for now.
return [param.name for param in signature.parameters.values()
if param.kind == param.POSITIONAL_OR_KEYWORD]
return tf_inspect.getargspec(symbol)[0]
def get_func_and_args_from_str(call_str):
"""Parse call string to get function and argument names.
Args:
call_str: Call string must be in the form:
`tf.foo(arg1=val1, arg2=val2, ...)`.
Returns:
(function_name, list of arg names) tuple.
"""
open_paren_index = six.ensure_str(call_str).find("(")
close_paren_index = call_str.rfind(")")
function_name = call_str[:six.ensure_str(call_str).find("(")]
args = six.ensure_str(call_str[open_paren_index +
1:close_paren_index]).split(",")
args = [six.ensure_str(arg).split("=")[0].strip() for arg in args]
args = [arg for arg in args if arg] # filter out empty strings
return function_name, args
class TestUpgrade(test_util.TensorFlowTestCase, parameterized.TestCase):
"""Test various APIs that have been changed in 2.0.
We also test whether a converted file is executable. test_file_v1_10.py
aims to exhaustively test that API changes are convertible and actually
work when run with current TensorFlow.
"""
@classmethod
def setUpClass(cls):
super(TestUpgrade, cls).setUpClass()
cls.v2_symbols = {}
cls.v1_symbols = {}
if hasattr(tf.compat, "v2"):
def symbol_collector(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v2 = tf_export.get_v2_names(attr)
for name in api_names_v2:
cls.v2_symbols["tf." + six.ensure_str(name)] = attr
visitor = public_api.PublicAPIVisitor(symbol_collector)
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v2, visitor)
if hasattr(tf.compat, "v1"):
def symbol_collector_v1(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v1 = tf_export.get_v1_names(attr)
for name in api_names_v1:
cls.v1_symbols["tf." + six.ensure_str(name)] = attr
visitor = public_api.PublicAPIVisitor(symbol_collector_v1)
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def _upgrade(self,
old_file_text,
import_rename=False,
upgrade_compat_v1_import=False):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(
tf_upgrade_v2.TFAPIChangeSpec(
import_rename, upgrade_compat_v1_import=upgrade_compat_v1_import))
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return count, report, errors, out_file.getvalue()
def _upgrade_multiple(self, old_file_texts):
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
results = []
for old_file_text in old_file_texts:
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
results.append([count, report, errors, out_file.getvalue()])
return results
def testParseError(self):
_, report, unused_errors, unused_new_text = self._upgrade(
"import tensorflow as tf\na + \n")
self.assertNotEqual(six.ensure_str(report).find("Failed to parse"), -1)
def testReport(self):
text = "tf.angle(a)\n"
_, report, unused_errors, unused_new_text = self._upgrade(text)
# This is not a complete test, but it is a sanity test that a report
# is generating information.
self.assertTrue(
six.ensure_str(report).find("Renamed function `tf.angle` to "
"`tf.math.angle`"))
def testRename(self):
text = "tf.conj(a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.conj(a)\n")
text = "tf.rsqrt(tf.log_sigmoid(3.8))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.rsqrt(tf.math.log_sigmoid(3.8))\n")
def testAllAPI(self):
if not hasattr(tf.compat, "v2"):
return
# Converts all symbols in the v1 namespace to the v2 namespace, raising
# an error if the target of the conversion is not in the v2 namespace.
# Please regenerate the renames file or edit any manual renames if this
# test fails.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names = tf_export.get_v1_names(attr)
for name in api_names:
_, _, _, text = self._upgrade("tf." + six.ensure_str(name))
if (text and
not text.startswith("tf.compat.v1") and
not text.startswith("tf.compat.v2") and
text not in self.v2_symbols and
# Ignore any symbol that contains __internal__
"__internal__" not in text and
# Builds currently install old version of estimator that doesn't
# have some 2.0 symbols.
not text.startswith("tf.estimator")):
self.assertFalse(
True, "Symbol %s generated from %s not in v2 API" % (
text, name))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testAllAPIV1(self):
collect = True
v1_symbols = set([])
# Converts all symbols in the v1 namespace to the v2 namespace, raising
# an error if the target of the conversion is not in the v1 namespace.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names = tf_export.get_v1_names(attr)
for name in api_names:
if collect:
v1_symbols.add("tf." + six.ensure_str(name))
else:
_, _, _, text = self._upgrade("tf." + six.ensure_str(name))
if (text and
not text.startswith("tf.compat.v1") and
not text.startswith("tf.compat.v2") and
not text.startswith("tf.estimator") and
text not in v1_symbols):
self.assertFalse(
True, "Symbol %s generated from %s not in v1 API" % (
text, name))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
collect = False
traverse.traverse(tf.compat.v1, visitor)
def testV1KeywordArgNames(self):
all_keyword_renames = (
tf_upgrade_v2.TFAPIChangeSpec().function_keyword_renames)
# Visitor that verifies V1 argument names.
def arg_test_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
names_v1 = tf_export.get_v1_names(attr)
for name in names_v1:
name = "tf.%s" % name
if name not in all_keyword_renames:
continue
arg_names_v1 = tf_inspect.getargspec(attr)[0]
keyword_renames = all_keyword_renames[name]
self.assertEqual(type(keyword_renames), dict)
# Assert that v1 function has valid v1 argument names.
for from_name, _ in keyword_renames.items():
self.assertIn(
from_name, arg_names_v1,
"%s not found in %s arguments: %s" %
(from_name, name, str(arg_names_v1)))
visitor = public_api.PublicAPIVisitor(arg_test_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testV2KeywordArgNames(self):
# This test converts a call of the form:
# tf.foo(arg1=0, arg2=1, ...)
# to 2.0. Then, checks that converted function has valid argument names.
if not hasattr(tf.compat, "v2"):
return
v2_arg_exceptions = {
"verify_shape_is_now_always_true",
# These arguments should not be used, they just specify
# that a function takes named arguments.
"keyword_required",
"_sentinel",
}
v1_name_exceptions = {
"tf.print", # requires print_function import
}
function_warnings = (
tf_upgrade_v2.TFAPIChangeSpec().function_warnings)
function_transformers = (
tf_upgrade_v2.TFAPIChangeSpec().function_transformers)
keyword_renames = (
tf_upgrade_v2.TFAPIChangeSpec().function_keyword_renames)
# Visitor that converts to V2 and checks V2 argument names.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
if not tf_inspect.isfunction(attr):
continue
names_v1 = tf_export.get_v1_names(attr)
arg_names_v1 = get_args(attr)
for name in names_v1:
tf_name = "tf.%s" % name
if tf_name in function_warnings or tf_name in function_transformers:
continue # These require manual change
if tf_name in v1_name_exceptions:
continue
# Assert that arg names after converting to v2 are present in
# v2 function.
# 1. First, create an input of the form:
# tf.foo(arg1=val1, arg2=val2, ...)
args = ",".join(
["%s=%d" % (from_name, from_index)
for from_index, from_name in enumerate(arg_names_v1)])
text_input = "%s(%s)" % (tf_name, args)
# 2. Convert the input to V2.
_, _, _, text = self._upgrade(text_input)
new_function_name, new_args = get_func_and_args_from_str(text)
if "__internal__" in new_function_name:
# Skip the tf.__internal__ and tf.keras.__internal__ API.
continue
if new_function_name == "tf.compat.v1.%s" % name:
if tf_name in keyword_renames:
# If we rename arguments, new function must be available in 2.0.
# We should not be using compat.v1 in this case.
self.fail(
"Function '%s' is not in 2.0 when converting\n%s\nto\n%s" %
(new_function_name, text_input, text))
continue
if new_function_name.startswith("tf.compat.v2"):
self.assertIn(new_function_name.replace("tf.compat.v2.", "tf."),
self.v2_symbols)
continue
# 3. Verify V2 function and arguments.
args_v2 = get_args(self.v2_symbols[new_function_name])
args_v2.extend(v2_arg_exceptions)
for new_arg in new_args:
self.assertIn(
new_arg, args_v2,
"Invalid argument '%s' in 2.0 when converting\n%s\nto\n%s.\n"
"Supported arguments: %s" % (
new_arg, text_input, text, str(args_v2)))
# 4. Verify that the argument exists in v1 as well.
if new_function_name in set(["tf.nn.ctc_loss",
"tf.saved_model.save"]):
continue
args_v1 = get_args(self.v1_symbols[new_function_name])
args_v1.extend(v2_arg_exceptions)
for new_arg in new_args:
self.assertIn(
new_arg, args_v1,
"Invalid argument '%s' in 1.0 when converting\n%s\nto\n%s.\n"
"Supported arguments: %s" % (
new_arg, text_input, text, str(args_v1)))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testPositionsMatchArgGiven(self):
full_dict = tf_upgrade_v2.TFAPIChangeSpec().function_arg_warnings
method_names = list(full_dict.keys())
for method_name in method_names:
args = list(full_dict[method_name].keys())
if "contrib" in method_name:
# Skip descending and fetching contrib methods during test. These are
# not available in the repo anymore.
continue
elif six.ensure_str(method_name).startswith("*."):
# special case for optimizer methods
method = six.ensure_str(method_name).replace("*", "tf.train.Optimizer")
else:
method = method_name
method = get_symbol_for_name(tf, method)
arg_spec = tf_inspect.getfullargspec(method)
for (arg, pos) in args:
# to deal with the self argument on methods on objects
if six.ensure_str(method_name).startswith("*."):
pos += 1
self.assertEqual(arg_spec[0][pos], arg)
def testReorderFileNeedsUpdate(self):
reordered_function_names = (
tf_upgrade_v2.TFAPIChangeSpec().reordered_function_names)
function_reorders = (
tf_upgrade_v2.TFAPIChangeSpec().function_reorders)
manual_function_reorders = (
tf_upgrade_v2.TFAPIChangeSpec().manual_function_reorders)
added_names_message = """Some function names in
self.reordered_function_names are not in reorders_v2.py.
Please run the following commands to update reorders_v2.py:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
removed_names_message = """%s in self.reorders_v2 does not match
any name in self.reordered_function_names.
Please run the following commands to update reorders_v2.py:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
self.assertTrue(
reordered_function_names.issubset(function_reorders),
added_names_message)
# function_reorders should contain reordered_function_names
# and their TensorFlow V1 aliases.
for name in function_reorders:
if name in manual_function_reorders:
continue
# get other names for this function
attr = get_symbol_for_name(tf.compat.v1, name)
_, attr = tf_decorator.unwrap(attr)
v1_names = tf_export.get_v1_names(attr)
self.assertTrue(v1_names)
v1_names = ["tf.%s" % n for n in v1_names]
# check if any other name is in
self.assertTrue(
any(n in reordered_function_names for n in v1_names),
removed_names_message % name)
def testRenameConstant(self):
text = "tf.MONOLITHIC_BUILD\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.sysconfig.MONOLITHIC_BUILD\n")
text = "some_call(tf.MONOLITHIC_BUILD)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "some_call(tf.sysconfig.MONOLITHIC_BUILD)\n")
def testRenameArgs(self):
text = ("tf.nn.pool(input_a, window_shape_a, pooling_type_a, padding_a, "
"dilation_rate_a, strides_a, name_a, data_format_a)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
("tf.nn.pool(input=input_a, window_shape=window_shape_a,"
" pooling_type=pooling_type_a, padding=padding_a, "
"dilations=dilation_rate_a, strides=strides_a, "
"name=name_a, data_format=data_format_a)\n"))
def testReorder(self):
text = "tf.boolean_mask(a, b, c, d)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
"tf.boolean_mask(tensor=a, mask=b, name=c, axis=d)\n")
def testLearningRateDecay(self):
for decay in ["tf.train.exponential_decay",
"tf.train.polynomial_decay", "tf.train.natural_exp_decay",
"tf.train.inverse_time_decay", "tf.train.cosine_decay",
"tf.train.cosine_decay_restarts",
"tf.train.linear_cosine_decay",
"tf.train.noisy_linear_cosine_decay",
"tf.train.piecewise_constant_decay",
]:
text = "%s(a, b)\n" % decay
_, report, unused_errors, _ = self._upgrade(text)
self.assertIn("switch to the schedules in "
"`tf.keras.optimizers.schedules`", report)
def verify_compat_v1_rename_correctness(self, values, ns_prefix=""):
if ns_prefix:
ns_prefix += "."
for v in values:
text = "tf." + ns_prefix + v + "(a, b)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1." + ns_prefix + v + "(a, b)", new_text)
def testInitializers(self):
initializers = [
"zeros",
"ones",
"constant",
"random_uniform",
"random_normal",
"truncated_normal",
"variance_scaling",
"orthogonal",
"glorot_uniform",
"glorot_normal",
"identity",
"lecun_normal",
"lecun_uniform",
"he_normal",
"he_uniform",
]
self.verify_compat_v1_rename_correctness(
initializers, ns_prefix="initializers")
initializers = [
"zeros_initializer",
"ones_initializer",
"constant_initializer",
"random_uniform_initializer",
"random_normal_initializer",
"truncated_normal_initializer",
"variance_scaling_initializer",
"orthogonal_initializer",
"glorot_uniform_initializer",
"glorot_normal_initializer",
]
self.verify_compat_v1_rename_correctness(initializers)
initializers = [
"zeros",
"ones",
"Ones",
"Zeros",
"constant",
"Constant",
"VarianceScaling",
"Orthogonal",
"orthogonal",
"Identity",
"identity",
"glorot_uniform",
"glorot_normal",
"lecun_normal",
"lecun_uniform",
"he_normal",
"he_uniform",
"TruncatedNormal",
"truncated_normal",
"RandomUniform",
"uniform",
"random_uniform",
"RandomNormal",
"normal",
"random_normal",
]
self.verify_compat_v1_rename_correctness(
initializers, ns_prefix="keras.initializers")
def testContribXavierInitializer(self):
for contrib_alias in ["tf.contrib.", "contrib_"]:
text = contrib_alias + "layers.xavier_initializer()\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=\"uniform\")\n",
)
text = "slim.xavier_initializer(True or False)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if True or False else "
"\"truncated_normal\"))\n",
)
text = "slim.xavier_initializer(uniform=(True or False))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if True or False else "
"\"truncated_normal\"))\n",
)
text = contrib_alias + "layers.xavier_initializer_conv2d(False, 12)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12)\n",
)
text = (contrib_alias + "layers.xavier_initializer_conv2d("
"False, 12, tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12, "
"dtype=tf.float32)\n",
)
text = (contrib_alias + "layers.xavier_initializer("
"False, 12, dtypes=tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12, "
"dtypes=tf.float32)\n",
)
def testVarianceScalingInitializer(self):
text = ("tf.contrib.layers.variance_scaling_initializer("
"mode=(\"FAN\" + \"_AVG\"))\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=2.0, "
"mode=(\"FAN\" + \"_AVG\").lower())\n",
)
text = ("slim.variance_scaling_initializer("
"uniform=(True or False), mode=(\"FAN\" + \"_AVG\"))\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=2.0, "
"distribution=(\"uniform\" if True or False else \"truncated_normal\"),"
" mode=(\"FAN\" + \"_AVG\").lower())\n",
)
text = "tf.contrib.layers.variance_scaling_initializer(factor=1.0)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0)\n",
)
text = ("tf.contrib.layers.variance_scaling_initializer("
"12.0, \"FAN_AVG\", True, dtypes=tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(12.0, "
"(\"FAN_AVG\").lower(), "
"(\"uniform\" if True else \"truncated_normal\"), "
"dtypes=tf.float32)\n",
)
def testMetrics(self):
metrics = [
"accuracy",
"auc",
"average_precision_at_k",
"false_negatives",
"false_negatives_at_thresholds",
"false_positives",
"false_positives_at_thresholds",
"mean",
"mean_absolute_error",
"mean_cosine_distance",
"mean_iou",
"mean_per_class_accuracy",
"mean_relative_error",
"mean_squared_error",
"mean_tensor",
"percentage_below",
"precision",
"precision_at_k",
"precision_at_thresholds",
"precision_at_top_k",
"recall",
"recall_at_k",
"recall_at_thresholds",
"recall_at_top_k",
"root_mean_squared_error",
"sensitivity_at_specificity",
"sparse_average_precision_at_k",
"sparse_precision_at_k",
"specificity_at_sensitivity",
"true_negatives",
"true_negatives_at_thresholds",
"true_positives",
"true_positives_at_thresholds",
]
for m in metrics:
text = "tf.metrics." + m + "(a, b)"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1.metrics." + m + "(a, b)", new_text)
self.assertIn(
"tf.metrics have been replaced with object oriented versions", report)
def testLosses(self):
losses = [
"absolute_difference",
"add_loss",
"compute_weighted_loss",
"cosine_distance",
"get_losses",
"get_regularization_loss",
"get_regularization_losses",
"get_total_loss",
"hinge_loss",
"huber_loss",
"log_loss",
"mean_pairwise_squared_error",
"mean_squared_error",
"sigmoid_cross_entropy",
"softmax_cross_entropy",
"sparse_softmax_cross_entropy",
]
for l in losses:
text = "tf.losses." + l + "(a, b)"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1.losses." + l + "(a, b)", new_text)
self.assertIn(
"tf.losses have been replaced with object oriented versions", report)
def testEstimatorLossReductionChange(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor", "DNNRegressor", "DNNClassifier",
"BaselineClassifier", "BaselineRegressor"
]
for c in classes:
ns = "tf.estimator." + c
text = ns + "()"
expected_text = ns + "(loss_reduction=tf.keras.losses.Reduction.SUM)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ns + "(loss_reduction=TEST)"
expected_text = ns + "(loss_reduction=TEST)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
text = "tf.estimator.BaselineClassifier(m, c, w, v, o, c, lr)"
expected_text = (
"tf.compat.v1.estimator.BaselineClassifier("
"model_dir=m, n_classes=c, weight_column=w, label_vocabulary=v, "
"optimizer=o, config=c, loss_reduction=lr)")
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.estimator.BaselineClassifier(model_dir=model_dir)"
expected_text = ("tf.estimator.BaselineClassifier(" +
"model_dir=model_dir, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testBaseEstimatorPartitioner(self):
classes = ["LinearEstimator", "DNNLinearCombinedEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorPartitioner(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST)"
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBaseEstimatorOptimizer(self):
classes = ["BaselineEstimator", "LinearEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(optimizer=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedEstimatorOptimizer(self):
classes = ["DNNLinearCombinedEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(dnn_optimizer=TEST, linear_optimizer=Test)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorOptimizer(self):
classes = [
"BaselineClassifier", "BaselineRegressor", "LinearClassifier",
"LinearRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(optimizer=TEST)"
text = ns + suffix
suffix = ("(optimizer=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedOptimizer(self):
classes = [
"DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor",
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(dnn_optimizer=TEST, linear_optimizer=Test)"
text = ns + suffix
suffix = ("(dnn_optimizer=TEST, linear_optimizer=Test, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBaseEstimatorPartitionerAndOptimizer(self):
classes = ["LinearEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST, optimizer=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedEstimatorPartitionerAndOptimizer(self):
classes = ["DNNLinearCombinedEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST)")
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorPartitionerAndOptimizer(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST, optimizer=TEST)"
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, optimizer=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedPartitionerAndOptimizer(self):
classes = [
"DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor",
]
for c in classes:
ns = "tf.estimator." + c
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST)")
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testExtractGlimpse(self):
text = ("tf.image.extract_glimpse(x, size, off, False, "
"False, False, name=\"foo\")\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.image.extract_glimpse(x, size, off, False, "
"False, 'uniform' if (False) else 'gaussian', name=\"foo\")\n",
)
text = ("tf.image.extract_glimpse(x, size, off, centered=False, "
"normalized=False, uniform_noise=True if uniform_noise else "
"False, name=\"foo\")\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.image.extract_glimpse(x, size, off, centered=False, "
"normalized=False, noise='uniform' if (True if uniform_noise else "
"False) else 'gaussian', name=\"foo\")\n",
)
text = ("tf.image.extract_glimpse(x,\n"
" size,\n"
" off,\n"
" centered=True,\n"
" normalized=True, # Stuff before\n"
" uniform_noise=False,\n"
" name=\"foo\")# Stuff after\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text, "tf.image.extract_glimpse(x,\n"
" size,\n"
" off,\n"
" centered=True,\n"
" normalized=True, # Stuff before\n"
" noise='uniform' if (False) else 'gaussian',\n"
" name=\"foo\")# Stuff after\n")
text = "tf.image.extract_glimpse(x)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text)
self.assertEqual(errors, [])
def testDropout(self):
text = "tf.nn.dropout(x, keep_prob, name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, rate=1 - (keep_prob), name=\"foo\")\n",
)
text = "tf.nn.dropout(x, keep_prob=.4, name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, rate=1 - (.4), name=\"foo\")\n",
)
text = (
"tf.nn.dropout(x, # Stuff before\n"
" keep_prob=.4, # Stuff after\n"
" name=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, # Stuff before\n"
" rate=1 - (.4), # Stuff after\n"
" name=\"foo\")\n",
)
text = "tf.nn.dropout(x)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text)
self.assertIn("tf.nn.dropout called without arguments", errors[0])
def testDropoutExpr(self):
text = "tf.nn.dropout(x, 1 - func(3 + 4.), name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, rate=1 - (1 - func(3 + 4.)), name=\"foo\")\n",
)
def testContribL1(self):
text = "tf.contrib.layers.l1_regularizer(scale)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1(scale)\n",
)
self.assertNotIn("Dropping scope", unused_report)
text = "tf.contrib.layers.l1_regularizer(scale, scope)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1(scale)\n",
)
self.assertIn("Dropping scope", unused_report)
text = (
"slim.l1_regularizer( # Stuff before\n"
" scale=.4,"
" scope=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1( # Stuff before\n"
" l=.4)\n",
)
self.assertIn("Dropping scope", unused_report)
def testContribL2(self):
text = "tf.contrib.layers.l2_regularizer(scale)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (scale))\n",
)
self.assertNotIn("Dropping scope", unused_report)
text = "tf.contrib.layers.l2_regularizer(scale, scope)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (scale))\n",
)
self.assertIn("Dropping scope", unused_report)
text = (
"slim.l2_regularizer( # Stuff before\n"
" scale=.4,"
" scope=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2( # Stuff before\n"
" l=0.5 * (.4))\n",
)
self.assertIn("Dropping scope", unused_report)
def testContribL2Expr(self):
text = "tf.contrib.layers.l2_regularizer(1 - func(3 + 4.), scope=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (1 - func(3 + 4.)))\n",
)
def testMathCountNonZeroChanges(self):
text = (
"tf.math.count_nonzero(input_tensor=input, dtype=dtype, name=name, "
"reduction_indices=axis, keep_dims=keepdims)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.math.count_nonzero(input=input, dtype=dtype, name=name, "
"axis=axis, keepdims=keepdims)\n"
)
self.assertEqual(new_text, expected_text)
def testCountNonZeroChanges(self):
text = (
"tf.count_nonzero(input_tensor=input, dtype=dtype, name=name, "
"reduction_indices=axis, keep_dims=keepdims)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.math.count_nonzero(input=input, dtype=dtype, name=name, "
"axis=axis, keepdims=keepdims)\n"
)
self.assertEqual(new_text, expected_text)
def testRandomMultinomialToRandomCategorical(self):
text = (
"tf.random.multinomial(logits, samples, seed, name, output_dtype)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "
"name=name, dtype=output_dtype)\n"
)
self.assertEqual(new_text, expected_text)
text = (
"tf.multinomial(logits, samples, seed, name, output_dtype)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "
"name=name, dtype=output_dtype)\n"
)
self.assertEqual(new_text, expected_text)
def testRandomPoissonConversion(self):
text1 = "tf.random_poisson(lam, shape, dtype)"
text2 = "tf.random.poisson(lam, shape, dtype)"
expected_text = "tf.random.poisson(lam=lam, shape=shape, dtype=dtype)"
_, unused_report, unused_errors, new_text1 = self._upgrade(text1)
self.assertEqual(new_text1, expected_text)
_, unused_report, unused_errors, new_text2 = self._upgrade(text2)
self.assertEqual(new_text2, expected_text)
def testConvolutionOpUpdate(self):
text = (
"tf.nn.convolution(input, filter, padding, strides, dilation_rate, "
"name, data_format)"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.nn.convolution(input=input, filters=filter, padding=padding, "
"strides=strides, dilations=dilation_rate, name=name, "
"data_format=data_format)"
)
self.assertEqual(new_text, expected_text)
def test_substr(self):
text = "tf.substr(input, pos, len, name, unit)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual("tf.strings.substr(input=input, pos=pos, len=len, "
"name=name, unit=unit)\n", new_text)
self.assertEqual(errors, [])
def testColocateGradientsWithOps(self):
text = "tf.gradients(yx=a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "tf.gradients(yx=a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.gradients(yx=a)\n", new_text)
self.assertIn("tf.gradients no longer takes", report)
text = "tf.gradients(y, x, grad_ys, name, colocate, gate)\n"
expected = ("tf.gradients(ys=y, xs=x, grad_ys=grad_ys, name=name, "
"gate_gradients=gate)\n")
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def testColocateGradientsWithOpsMinimize(self):
text = "optimizer.minimize(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "optimizer.minimize(a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("optimizer.minimize(a)\n", new_text)
self.assertIn("Optimizer.minimize no longer takes", report)
def testColocateGradientsWithOpsComputeGradients(self):
text = "optimizer.compute_gradients(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "optimizer.compute_gradients(a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("optimizer.compute_gradients(a)\n", new_text)
self.assertIn("Optimizer.compute_gradients no longer takes", report)
def testColocateGradientsWithHessians(self):
text = "tf.hessians(ys=a, xs=b, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.hessians(ys=a, xs=b)\n", new_text)
self.assertIn("tf.hessians no longer takes", report)
def testExportSavedModelRename(self):
text = "self.est.export_savedmodel(path)"
_, report, unused_errors, unused_new_text = self._upgrade(text)
self.assertIn(
"rename the method export_savedmodel() to export_saved_model()",
report)
def testArgmin(self):
text = "tf.argmin(input, name=n, dimension=1, output_type=type)"
expected_text = "tf.argmin(input=input, name=n, axis=1, output_type=type)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.argmin(input, 0)"
expected_text = "tf.argmin(input=input, axis=0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.arg_min(input, 0)"
expected_text = "tf.argmin(input, 0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testArgmax(self):
text = "tf.argmax(input, name=n, dimension=1, output_type=type)"
expected_text = "tf.argmax(input=input, name=n, axis=1, output_type=type)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.argmax(input, 0)"
expected_text = "tf.argmax(input=input, axis=0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.arg_max(input, 0)"
expected_text = "tf.argmax(input, 0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testAutograph(self):
text = "tf.autograph.to_graph(f, True, arg_values=None, arg_types=None)"
expected_text = "tf.autograph.to_graph(f, True)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.autograph.to_code"
"(f, False, arg_values=None, arg_types=None, indentation=' ')")
expected_text = "tf.autograph.to_code(f, False)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEstimatorInputs(self):
text = "tf.estimator.inputs.numpy_input_fn(0)"
expected_text = "tf.compat.v1.estimator.inputs.numpy_input_fn(0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.estimator.inputs.pandas_input_fn(0)"
expected_text = "tf.compat.v1.estimator.inputs.pandas_input_fn(0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBatchToSpace(self):
text = "tf.batch_to_space_nd(input, block_shape, crops, name)"
expected_text = "tf.batch_to_space(input, block_shape, crops, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.batch_to_space(input, crops, block_size, name)"
expected_text = (
"tf.batch_to_space(input=input, crops=crops, block_shape=block_size, "
"name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.manip.batch_to_space_nd(input, block_shape, crops, name)"
expected_text = "tf.batch_to_space(input, block_shape, crops, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testExtractImagePatches(self):
text = (
"tf.extract_image_patches(images, ksizes=ksizes, strides=strides,"
"rates=rates, padding=padding, name=name)")
expected_text = (
"tf.image.extract_patches(images, sizes=ksizes, strides=strides,"
"rates=rates, padding=padding, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testKerasSavedModel(self):
text = (
"tf.contrib.saved_model.save_keras_model(model, './saved_models')\n"
"tf.contrib.saved_model.load_keras_model(saved_model_path)\n")
expected_text = (
"tf.compat.v1.keras.experimental.export_saved_model(model, "
"'./saved_models')\ntf.compat.v1.keras.experimental."
"load_from_saved_model(saved_model_path)\n"
)
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
expected_info = "Please use model.save"
self.assertIn(expected_info, report)
def testStatelessMultinomial(self):
text = (
"tf.random.stateless_multinomial(logits, num_samples, seed, "
"output_dtype=dtype, name=name)")
expected_text = (
"tf.random.stateless_categorical(logits, num_samples, seed, "
"dtype=dtype, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSoftMaxCrossEntropyWithLogitsV2(self):
text = (
"tf.nn.softmax_cross_entropy_with_logits_v2("
"labels=labels, logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=labels, logits=logits, axis=2)")
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertFalse(errors)
def testSoftMaxCrossEntropyWithLogits(self):
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=labels, logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, axis=2)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo(bar))")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testSoftMaxCrossEntropyWithLogitsDoesntNest(self):
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, axis=2)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo())")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo()))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo().zz())")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo().zz()))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testSparseMatmul(self):
text = ("tf.sparse_matmul(a, b, c, d, e, f, g)\n")
expected_text = ("tf.linalg.matmul(a=a, b=b, transpose_a=c, transpose_b=d, "
"a_is_sparse=e, b_is_sparse=f, name=g)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testWeightedMoments(self):
text = "tf.nn.weighted_moments(x, axes, freq, name, kd)"
expected_text = (
"tf.nn.weighted_moments(x=x, axes=axes, frequency_weights=freq, "
"name=name, keepdims=kd)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSparseAdd(self):
text = "tf.sparse.add(a, b, t)"
expected_text = "tf.sparse.add(a=a, b=b, threshold=t)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSparseConcat(self):
text = "tf.sparse.concat(ax, inp, name, exp, concat)"
expected_text = (
"tf.sparse.concat(axis=ax, sp_inputs=inp, name=name, "
"expand_nonconcat_dims=exp, axis=concat)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSeparableConv2D(self):
text = "tf.nn.separable_conv2d(inp, d, pt, strides, pad, rate, name, fmt)"
expected_text = (
"tf.nn.separable_conv2d(input=inp, depthwise_filter=d, "
"pointwise_filter=pt, strides=strides, padding=pad, "
"dilations=rate, name=name, data_format=fmt)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2D(self):
text = (
"tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu, "
"data_format)")
expected_text = (
"tf.nn.conv2d(input=input, filters=filter, strides=strides, "
"padding=padding, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.nn.conv2d(input, filter=filter, strides=strides, padding=padding, "
"use_cudnn_on_gpu=use_cudnn_on_gpu)")
expected_text = ("tf.nn.conv2d(input=input, filters=filter, "
"strides=strides, padding=padding)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2DBackpropFilter(self):
text = (
"tf.nn.conv2d_backprop_filter(input, filter_sizes, out_backprop, "
"strides, padding, use_cudnn_on_gpu, data_format)")
expected_text = (
"tf.compat.v1.nn.conv2d_backprop_filter(input, filter_sizes, "
"out_backprop, strides, padding, use_cudnn_on_gpu, data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2DBackpropInput(self):
text = (
"tf.nn.conv2d_backprop_input(input_sizes, filter, out_backprop, "
"strides, padding, use_cudnn_on_gpu, data_format)")
expected_text = (
"tf.nn.conv2d_transpose(output_shape=input_sizes, filters=filter, "
"input=out_backprop, strides=strides, padding=padding, "
"data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSpacetoBatch(self):
text = "tf.space_to_batch_nd(input, shape, paddings, name)"
expected_text = "tf.space_to_batch(input, shape, paddings, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.nn.space_to_batch(input, paddings, block_size, name)"
expected_text = (
"tf.space_to_batch(input=input, paddings=paddings, "
"block_shape=block_size, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testInTopK(self):
text = "tf.math.in_top_k(a, b, c, n)"
expected_text = (
"tf.math.in_top_k(predictions=a, targets=b, k=c, name=n)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDepthToSpace(self):
text = "tf.nn.depth_to_space(input, block_size, name, data_format)"
expected_text = (
"tf.nn.depth_to_space(input=input, block_size=block_size, "
"name=name, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEmbeddingLookup(self):
text = ("tf.nn.embedding_lookup(params, ids, partition_strategy, name, "
"validate_indices, max_norm)")
expected_text = ("tf.nn.embedding_lookup(params=params, ids=ids, "
"partition_strategy=partition_strategy, name=name, "
"max_norm=max_norm)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEmbeddingLookupSparse(self):
text = ("tf.nn.embedding_lookup_sparse(params, sp_ids, sp_weights, "
"partition_strategy, name, combiner, max_norm)")
expected_text = ("tf.nn.embedding_lookup_sparse(params=params, "
"sp_ids=sp_ids, sp_weights=sp_weights, "
"partition_strategy=partition_strategy, name=name, "
"combiner=combiner, max_norm=max_norm)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnInTopK(self):
text = "tf.nn.in_top_k(predictions, targets, k, name)"
expected_text = ("tf.nn.in_top_k(predictions=predictions, "
"targets=targets, k=k, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSpaceToDepth(self):
text = "tf.nn.space_to_depth(input, block_size, name, data_format)"
expected_text = ("tf.nn.space_to_depth(input=input, block_size=block_size, "
"name=name, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testPrint(self):
# tf.print() cannot be parsed unless we import print_function
text = """from __future__ import print_function
tf.print()
tf.print('abc')
"""
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text) # Text should stay the same
def testSparseSplit(self):
text = (
"tf.sparse_split(sp_input=sp_input, num_split=num_split, axis=axis, "
"name=name)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, axis=axis, "
"name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.sparse_split(sp_input=sp_input, num_split=num_split, "
"name=name, split_dim=axis)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, axis=axis)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, split_dim=axis)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, axis=axis)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testIterators(self):
for (text, expected) in [
("(expr + yielding(data)).make_one_shot_iterator()",
"tf.compat.v1.data.make_one_shot_iterator((expr + yielding(data)))"),
("dataset.make_one_shot_iterator()",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("dataset.make_one_shot_iterator(shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("dataset.make_one_shot_iterator(x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("dataset.make_initializable_iterator()",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("ds.make_initializable_iterator(shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("dataset.make_initializable_iterator(x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)"),
("tf.data.make_one_shot_iterator(dataset)",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("tf.data.make_one_shot_iterator(dataset, shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("tf.data.make_one_shot_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("tf.data.make_initializable_iterator(dataset)",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("tf.data.make_initializable_iterator(ds, shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("tf.data.make_initializable_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset)",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("tf.compat.v1.data.make_initializable_iterator(dataset)",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)")]:
_, unused_report, unused_errors, actual = self._upgrade(text)
self.assertEqual(actual, expected)
def testStructure(self):
for (text, expected) in [
("tf.data.experimental.DatasetStructure", "tf.data.DatasetSpec"),
("tf.data.experimental.OptionalStructure", "tf.OptionalSpec"),
("tf.data.experimental.RaggedTensorStructure", "tf.RaggedTensorSpec"),
("tf.data.experimental.SparseTensorStructure", "tf.SparseTensorSpec"),
("tf.data.experimental.Structure", "tf.TypeSpec"),
("tf.data.experimental.TensorArrayStructure", "tf.TensorArraySpec"),
("tf.data.experimental.TensorStructure", "tf.TensorSpec"),
]:
_, unused_report, unused_errors, actual = self._upgrade(text)
self.assertEqual(actual, expected)
def testMapAndBatch(self):
suffix = ".data.experimental.map_and_batch_with_legacy_function(args)"
text = "tf" + suffix
expected = "tf.compat.v1" + suffix
_, unused_report, unused_errors, actual = self._upgrade(text)
self.assertEqual(actual, expected)
def testCast(self):
for (name, dtype) in [("int32", "int32"),
("int64", "int64"),
("float", "float32"),
("double", "float64"),
("complex64", "complex64"),
("complex128", "complex128"),
("bfloat16", "bfloat16")]:
text = "tf.to_%s(x, name='test')" % name
expected_text = "tf.cast(x, name='test', dtype=tf.%s)" % dtype
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testCastPositionalSecondArgument(self):
for (name, dtype) in [("int32", "int32"),
("int64", "int64"),
("float", "float32"),
("double", "float64"),
("complex64", "complex64"),
("complex128", "complex128"),
("bfloat16", "bfloat16")]:
text = "tf.to_%s(x, 'test')" % name
expected_text = "tf.cast(x, name='test', dtype=tf.%s)" % dtype
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testImageResize(self):
for method in ["bilinear", "area", "bicubic", "nearest_neighbor"]:
text = "tf.image.resize_%s(i, s)" % method
expected_text = ("tf.image.resize(i, s, "
"method=tf.image.ResizeMethod.%s)" % method.upper())
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testImageResizeExtraPositionalArgs(self):
for method in ["bilinear", "area", "bicubic", "nearest_neighbor"]:
text = "tf.image.resize_%s(i, s, a, p)" % method
expected_text = [
"tf.image.resize(i, s, ", "preserve_aspect_ratio=p, ",
"method=tf.image.ResizeMethod.%s)" % method.upper()
]
_, unused_report, unused_errors, new_text = self._upgrade(text)
for s in expected_text:
self.assertIn(s, new_text)
def testCond(self):
text = "tf.cond(a, b, c, True)"
expected_text = "tf.cond(pred=a, true_fn=b, false_fn=c)"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("tf.cond", errors[0])
self.assertIn("requires manual check", errors[0])
def testParens(self):
text = """
def _log_prob(self, x):
return tf.reduce_logsumexp(
(self.mixture_distribution.logits + self.distribution.log_prob(
x[..., tf.newaxis])),
axis=-1)"""
expected_text = """
def _log_prob(self, x):
return tf.reduce_logsumexp(
input_tensor=(self.mixture_distribution.logits + self.distribution.log_prob(
x[..., tf.newaxis])),
axis=-1)"""
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testAssertStatements(self):
for name in [
"assert_greater", "assert_equal", "assert_none_equal", "assert_less",
"assert_negative", "assert_positive", "assert_non_negative",
"assert_non_positive", "assert_near", "assert_less",
"assert_less_equal", "assert_greater", "assert_greater_equal",
"assert_scalar"
]:
text = "tf.%s(a)" % name
expected_text = "tf.compat.v1.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
text = "tf.debugging.%s(a)" % name
expected_text = "tf.compat.v1.debugging.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
def testAssertRankStatements(self):
for name in ["assert_rank", "assert_rank_at_least", "assert_rank_in"]:
text = "tf.%s(a)" % name
expected_text = "tf.compat.v1.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
text = "tf.debugging.%s(a)" % name
expected_text = "tf.compat.v1.debugging.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
def test_assert_equal_graph_def(self):
text = ("tf.test.assert_equal_graph_def(a, b, checkpoint_v2=x, "
"hash_table_shared_name=y)")
expected = "tf.test.assert_equal_graph_def(actual=a, expected=b)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_is_tensor_upgrade(self):
text = "tf.contrib.framework.is_tensor(x)"
expected = "tf.is_tensor(x)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_is_tensor_direct_import_upgrade(self):
text = "contrib_framework.is_tensor(x)"
expected = "tf.is_tensor(x)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_CriticalSection_upgrade(self):
text = "tf.contrib.framework.CriticalSection(shared_name='blah')"
expected = "tf.CriticalSection(shared_name='blah')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_sample_distorted_bounding_box(self):
# pylint: disable=line-too-long
text = "tf.image.sample_distorted_bounding_box(a, b, c, d, e, f, g, h, i, j)"
expected = "tf.image.sample_distorted_bounding_box(image_size=a, bounding_boxes=b, seed=c, min_object_covered=e, aspect_ratio_range=f, area_range=g, max_attempts=h, use_image_if_no_bounding_boxes=i, name=j)"
# pylint: enable=line-too-long
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_initialize(self):
text = "tf.contrib.summary.initialize"
expected = "tf.compat.v1.summary.initialize"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_framework_argsort(self):
text = "tf.contrib.framework.argsort"
expected = "tf.argsort"
# pylint: enable=line-too-long
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_flags_bare(self):
_, _, errors, _ = self._upgrade("tf.flags")
self.assertIn("tf.flags and tf.app.flags have been removed", errors[0])
def test_flags_flags(self):
_, _, errors, _ = self._upgrade("tf.flags.FLAGS")
self.assertIn("tf.flags and tf.app.flags have been removed", errors[0])
def test_contrib_estimator_head_deprecation(self):
for contrib_alias in ["tf.contrib.", "contrib_"]:
api_symbols = ["binary_classification_head", "logistic_regression_head",
"multi_class_head", "multi_head", "multi_label_head",
"poisson_regression_head", "regression_head"]
for symbol in api_symbols:
text = contrib_alias + "estimator." + symbol
_, report, _, _ = self._upgrade(text)
self.assertIn("`tf.contrib.estimator.*_head` has been deprecated",
report)
def test_contrib_layers_layer_norm_deprecation(self):
for contrib_alias in ["tf.contrib.", "contrib_"]:
_, report, _, _ = self._upgrade(contrib_alias + "layers.layer_norm")
self.assertIn(
"`tf.contrib.layers.layer_norm` has been deprecated", report)
def test_contrib_rnn_deprecation(self):
_, report, _, _ = self._upgrade("tf.contrib.rnn")
self.assertIn("tf.contrib.rnn.* has been deprecated", report)
def test_contrib_cudnn_rnn_deprecation(self):
_, report, _, _ = self._upgrade("tf.contrib.cudnn_rnn")
self.assertIn("tf.contrib.cudnn_rnn.* has been deprecated", report)
def test_max_pool_2d(self):
text = "tf.nn.max_pool(value=4)"
expected_text = "tf.nn.max_pool2d(input=4)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_estimator_early_stopping(self):
for contrib_alias in ["tf.contrib.", "contrib_"]:
api_symbols = [
"make_early_stopping_hook", "stop_if_higher_hook",
"stop_if_lower_hook",
"stop_if_no_decrease_hook", "stop_if_no_increase_hook"
]
for symbol in api_symbols:
text = contrib_alias + "estimator." + symbol
expected_text = "tf.estimator.experimental." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_rnn_cell(self):
api_symbols = ["RNNCell", "BasicLSTMCell", "BasicRNNCell", "GRUCell",
"LSTMCell", "MultiRNNCell"]
for symbol in api_symbols:
text = "tf.contrib.rnn." + symbol
expected_text = "tf.compat.v1.nn.rnn_cell." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_rnn_function(self):
api_symbols = ["static_rnn", "static_state_saving_rnn",
"static_bidirectional_rnn"]
for symbol in api_symbols:
text = "tf.contrib.rnn." + symbol
expected_text = "tf.compat.v1.nn." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_summary_generic(self):
text = "tf.contrib.summary.generic('foo', myval, meta, 'fam', 42)"
expected = ("tf.compat.v2.summary.write(tag='foo', data=myval, "
"metadata=meta, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
# Arg errors come in alphabetical order of arguments, not appearance order.
self.assertIn("'family' argument", errors[0])
self.assertIn("'name' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_audio(self):
text = "tf.contrib.summary.audio('foo', myval, 44100, 3, 'fam', 42)"
expected = ("tf.compat.v2.summary.audio(name='foo', data=myval, "
"sample_rate=44100, max_outputs=3, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_histogram(self):
text = "tf.contrib.summary.histogram('foo', myval, 'fam', 42)"
expected = ("tf.compat.v2.summary.histogram(name='foo', data=myval, "
"step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_image(self):
text = "tf.contrib.summary.image('foo', myval, red, 3, 'fam', 42)"
expected = ("tf.compat.v2.summary.image(name='foo', data=myval, "
"max_outputs=3, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'bad_color' argument", errors[0])
self.assertIn("'family' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_scalar(self):
text = "tf.contrib.summary.scalar('foo', myval, 'fam', 42)"
expected = ("tf.compat.v2.summary.scalar(name='foo', data=myval, "
"step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_generic_nostep(self):
text = "tf.contrib.summary.generic('foo', myval)"
expected = ("tf.compat.v2.summary.write(tag='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'name' argument", errors[0])
self.assertIn("'step' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_audio_nostep(self):
text = "tf.contrib.summary.audio('foo', myval, 44100)"
expected = ("tf.compat.v2.summary.audio(name='foo', data=myval, "
"sample_rate=44100, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_histogram_nostep(self):
text = "tf.contrib.summary.histogram('foo', myval)"
expected = ("tf.compat.v2.summary.histogram(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_image_nostep(self):
text = "tf.contrib.summary.image('foo', myval)"
expected = ("tf.compat.v2.summary.image(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_scalar_nostep(self):
text = "tf.contrib.summary.scalar('foo', myval)"
expected = ("tf.compat.v2.summary.scalar(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_graph(self):
text = "tf.contrib.summary.graph(my_graph)"
_, _, errors, _ = self._upgrade(text)
expected_error = "tf.compat.v2.summary.trace"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_import_event(self):
text = "tf.contrib.summary.import_event(my_event)"
_, _, errors, _ = self._upgrade(text)
expected_error = "tf.compat.v2.summary.experimental.write_raw_pb"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_flush(self):
text = "tf.contrib.summary.flush(writer=foo)"
expected = "tf.compat.v2.summary.flush(writer=foo)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_create_file_writer(self):
text = ("tf.contrib.summary.create_file_writer('my_logdir', 0, 1000, "
"'.foo', 'shared-name')")
expected = ("tf.compat.v2.summary.create_file_writer(logdir='my_logdir', "
"max_queue=0, flush_millis=1000, filename_suffix='.foo')")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'name' argument", errors[0])
self.assertIn("no longer re-uses existing event files", errors[1])
def test_contrib_summary_always_record_summaries(self):
text = "tf.contrib.summary.always_record_summaries()"
expected = "tf.compat.v2.summary.record_if(True)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_never_record_summaries(self):
text = "tf.contrib.summary.never_record_summaries()"
expected = "tf.compat.v2.summary.record_if(False)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_record_summaries_every_n_global_steps(self):
text = "tf.contrib.summary.record_summaries_every_n_global_steps(10)"
_, _, errors, _ = self._upgrade(text)
expected_error = "replaced by a call to tf.compat.v2.summary.record_if()"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_all_summary_ops(self):
text = "tf.contrib.summary.all_summary_ops()"
expected = "tf.compat.v1.summary.all_v2_summary_ops()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_full_example(self):
deindent = lambda n, s: "\n".join(line[n:] for line in s.split("\n"))
text = deindent(4, """
import tensorflow as tf
tf.enable_eager_execution()
writer = tf.contrib.summary.create_file_writer(
"/tmp/migration_test", flush_millis=1000)
with writer.as_default(), tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", 0.42)
tf.contrib.summary.histogram("weights", [1.0, 2.0], step=7)
tf.contrib.summary.flush()
""")
expected = deindent(4, """
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
writer = tf.compat.v2.summary.create_file_writer(
logdir="/tmp/migration_test", flush_millis=1000)
with writer.as_default(), tf.compat.v2.summary.record_if(True):
tf.compat.v2.summary.scalar(name="loss", data=0.42, step=tf.compat.v1.train.get_or_create_global_step())
tf.compat.v2.summary.histogram(name="weights", data=[1.0, 2.0], step=7)
tf.compat.v2.summary.flush()
""")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_summary_api_warning(self):
text = "tf.summary.scalar('foo', 42)"
_, report, _, _ = self._upgrade(text)
expected_info = "TF 1.x summary API cannot be automatically migrated"
self.assertIn(expected_info, report)
def test_avg_pool_2d(self):
text = "tf.nn.avg_pool(value=4)"
expected_text = "tf.nn.avg_pool2d(input=4)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_saved_model_load(self):
text = "tf.saved_model.load(sess, ['foo_graph'])"
expected = "tf.compat.v1.saved_model.load(sess, ['foo_graph'])"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_saved_model_load_v2(self):
text = "tf.saved_model.load_v2('/tmp/blah')"
expected = "tf.compat.v2.saved_model.load('/tmp/blah')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_app_flags(self):
text = "flags = tf.app.flags"
expected = "flags = tf.compat.v1.app.flags"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_uniform_unit_scaling_initializer(self):
text = "tf.uniform_unit_scaling_initializer(0.5)"
expected_text = ("tf.compat.v1.keras.initializers.VarianceScaling("
"scale=0.5, distribution=\"uniform\")")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.initializers.uniform_unit_scaling(0.5)"
expected_text = ("tf.compat.v1.keras.initializers.VarianceScaling("
"scale=0.5, distribution=\"uniform\")")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_name_scope(self):
text = "tf.name_scope(None, default_name, [some, values])"
expected_text = "tf.name_scope(name=default_name)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.name_scope(default_name=default_name, values=stuff)"
expected_text = "tf.name_scope(name=default_name)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.name_scope(name=n, default_name=d, values=s)"
expected_text = "tf.compat.v1.name_scope(name=n, default_name=d, values=s)"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("`name` passed to `name_scope`", report)
text = "tf.name_scope(name=None, values=stuff)"
_, _, errors, _ = self._upgrade(text)
self.assertIn("name_scope call with neither name nor default_name",
errors[0])
@parameterized.parameters(
# Rename parameter: delimiter -> sep and add .to_sparse()
["tf.string_split('test', delimiter=' ')",
"tf.strings.split(input='test', sep=' ').to_sparse()"],
# Rename parameter: source -> input
["tf.strings.split(source='test1')",
"tf.strings.split(input='test1').to_sparse()"],
# Use compat.v1 for skip_empty parameter.
["tf.string_split('test', ' ', True)",
"tf.compat.v1.string_split(source='test', sep=' ', skip_empty=True)"],
["tf.string_split('test', ' ', skip_empty=False)",
"tf.strings.split(input='test', sep=' ').to_sparse()"],
# Split behavior for sep=None changed. (In particular, it now splits on
# all whitespace, not just the space character)
["tf.string_split(x)",
"tf.compat.v1.string_split(source=x)"],
# Split behavior for sep='' changed:
["tf.string_split(x, '')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, sep='')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, delimiter='')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, '', result_type='RaggedTensor')",
"tf.strings.bytes_split(input=x)"],
# If sep is a variable, we can't tell if it's empty:
["tf.string_split(x, sep)",
"tf.compat.v1.string_split(source=x, sep=sep)"],
# If sep is a non-empty string literal, then we don't need compat.v1.
["tf.string_split(x, 'non-empty-sep')",
"tf.strings.split(input=x, sep='non-empty-sep').to_sparse()"],
# Add to_sparse unless result_type is RaggedTensor:
["tf.string_split(x, ' ')",
"tf.strings.split(input=x, sep=' ').to_sparse()"],
["tf.string_split(x, ' ', result_type='SparseTensor')",
"tf.strings.split(input=x, sep=' ').to_sparse()"],
["tf.string_split(x, ' ', result_type='RaggedTensor')",
"tf.strings.split(input=x, sep=' ')"],
["tf.string_split(x, ' ', result_type=x)",
"tf.compat.v1.string_split(source=x, sep=' ', result_type=x)"],
) # pyformat: disable
# TODO(b/129398290)
def DISABLED_test_string_split(self, text, expected_text):
"""Tests for transforming from tf.string_split."""
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
@parameterized.parameters(
# Add to_sparse unless result_type is RaggedTensor:
["tf.strings.split(x, sep)",
"tf.strings.split(x, sep).to_sparse()"],
["tf.strings.split(x, sep, result_type='SparseTensor')",
"tf.strings.split(x, sep).to_sparse()"],
["tf.strings.split(x, sep, result_type='RaggedTensor')",
"tf.strings.split(x, sep)"],
["tf.strings.split(x, sep, result_type=x)",
"tf.compat.v1.strings.split(x, sep, result_type=x)"],
) # pyformat: disable
def test_strings_split(self, text, expected_text):
"""Tests for transforming from tf.strings.split."""
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_sdca_to_raw_ops(self):
text = "tf.train.sdca_fprint(input_tensor)"
expected_text = "tf.raw_ops.SdcaFprint(input=input_tensor)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.train.sdca_fprint(input, name=n)"
expected_text = "tf.raw_ops.SdcaFprint(input=input, name=n)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.train.sdca_shrink_l1(w, l, ll)"
expected_text = "tf.raw_ops.SdcaShrinkL1(weights=w, l1=l, l2=ll)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = (
"tf.train.sdca_optimizer(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o)")
expected_text = (
"tf.raw_ops.SdcaOptimizer(sparse_example_indices=a, "
"sparse_feature_indices=b, sparse_feature_values=c, dense_features=d, "
"example_weights=e, example_labels=f, sparse_indices=g, "
"sparse_weights=h, dense_weights=i, example_state_data=j, loss_type=k, "
"l1=l, l2=m, num_loss_partitions=n, num_inner_iterations=o)")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_to_addons_move(self):
small_mapping = {
"tf.contrib.layers.poincare_normalize":
"tfa.layers.PoincareNormalize",
"tf.contrib.layers.maxout":
"tfa.layers.Maxout",
"tf.contrib.layers.group_norm":
"tfa.layers.GroupNormalization",
"tf.contrib.layers.instance_norm":
"tfa.layers.InstanceNormalization",
}
for symbol, replacement in small_mapping.items():
text = "{}('stuff', *args, **kwargs)".format(symbol)
_, report, _, _ = self._upgrade(text)
self.assertIn(replacement, report)
def testXlaExperimental(self):
text = "tf.xla.experimental.jit_scope(0)"
expected_text = "tf.xla.experimental.jit_scope(0)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.xla.experimental.compile(0)"
expected_text = "tf.xla.experimental.compile(0)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnErosion2d(self):
text = "tf.nn.erosion2d(v, k, s, r, p)"
expected_text = "tf.nn.erosion2d(v, k, s, r, p, data_format='NHWC')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnDilation2d(self):
text = "tf.nn.dilation2d(v, k, s, r, p)"
expected_text = "tf.nn.dilation2d(v, k, s, r, p, data_format='NHWC')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testPywrapTensorflowWarning(self):
text = "tf.pywrap_tensorflow.foo()"
expected = "tf.pywrap_tensorflow.foo()"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("`tf.pywrap_tensorflow` will not be distributed", errors[0])
def testKerasSaveModelFormat(self):
text = "tf.keras.models.save_model(model, path)"
expected_text = "tf.keras.models.save_model(model, path, save_format='h5')"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertNotIn(
"saves to the Tensorflow SavedModel format by default", report)
_, report, _, _ = self._upgrade("model.save(path)")
self.assertIn(
"saves to the Tensorflow SavedModel format by default", report)
def test_distribute_strategy(self):
text = "tf.contrib.distribute.CrossDeviceOps()"
expected = "tf.distribute.CrossDeviceOps()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
text = "tf.contrib.distribute.MirroredStrategy"
expected = "tf.contrib.distribute.MirroredStrategy"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("migrated to tf.distribute.MirroredStrategy", errors[0])
text = "tf.distribute.MirroredStrategy"
expected = "tf.distribute.MirroredStrategy"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("tf.distribute.MirroredStrategy API has changed", report)
self.assertIn("make_dataset_iterator->experimental_distribute_dataset",
report)
text = "tf.contrib.distribute.TPUStrategy"
expected = "tf.contrib.distribute.TPUStrategy"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("migrated to tf.distribute.TPUStrategy",
errors[0])
text = "tf.contrib.distribute.foo"
expected = "tf.contrib.distribute.foo"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("tf.contrib.distribute.* have been migrated", report)
def test_decode_raw(self):
text = "tf.io.decode_raw(bytes=[1,2,3], output_dtype=tf.int32)"
expected_text = (
"tf.io.decode_raw(input_bytes=[1,2,3], output_dtype=tf.int32)")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testRecomputeGrad(self):
text = "tf.contrib.layers.recompute_grad()"
expected = "tf.recompute_grad()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_load_variable(self):
text = "tf.contrib.framework.load_variable('a')"
expected_text = (
"tf.train.load_variable('a')")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.contrib.framework.load_variable(checkpoint_dir='a')"
expected_text = (
"tf.train.load_variable(ckpt_dir_or_file='a')")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_import_rename_analysis(self):
old_symbol = "tf.conj(a)"
new_symbol = "tf.math.conj(a)"
import_header = "import tensorflow as tf\n"
text = import_header + old_symbol
expected_text = "import tensorflow.compat.v2 as tf\n" + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(
text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = "import tensorflow as tf, other_import as y\n"
text = import_header + old_symbol
new_import_header = "import tensorflow.compat.v2 as tf, other_import as y\n"
expected_text = new_import_header + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(
text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = ("import tensorflow as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
text = import_header + old_symbol
expected_header = ("import tensorflow.compat.v2 as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
expected_text = expected_header + new_symbol
_, _, _, new_text = self._upgrade(text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = ("import tensorflow.compat.v1 as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
text = import_header + old_symbol
expected_header = ("import tensorflow.compat.v2 as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
expected_text = expected_header + new_symbol
_, _, _, new_text = self._upgrade(
text, import_rename=True, upgrade_compat_v1_import=True)
self.assertEqual(new_text, expected_text)
import_header = ("import tensorflow.compat.v1 as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
text = import_header + old_symbol
expected_header = ("import tensorflow as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
expected_text = expected_header + new_symbol
_, _, _, new_text = self._upgrade(
text, import_rename=False, upgrade_compat_v1_import=True)
self.assertEqual(new_text, expected_text)
import_header = "from tensorflow import foo\n"
text = import_header + old_symbol
expected_text = "from tensorflow.compat.v2 import foo\n" + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(
text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = "from tensorflow import *\n"
text = import_header + old_symbol
expected_text = "from tensorflow.compat.v2 import *\n" + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(
text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = "from tensorflow.foo import bar\n"
text = import_header + old_symbol
expected_text = "from tensorflow.compat.v2.foo import bar\n" + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(
text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = ("from tensorflow import foo as tf\n"
"from tensorflow.compat import v1 as tf_v1\n"
"from tensorflow.compat import v2 as tf_v2\n")
text = import_header + old_symbol
expected_header = ("from tensorflow.compat.v2 import foo as tf\n"
"from tensorflow.compat import v1 as tf_v1\n"
"from tensorflow.compat import v2 as tf_v2\n")
expected_text = expected_header + new_symbol
_, _, _, new_text = self._upgrade(text, import_rename=True)
self.assertEqual(new_text, expected_text)
def test_import_analysis(self):
old_symbol = "tf.conj(a)"
new_symbol = "tf.math.conj(a)"
# We upgrade the base un-versioned tensorflow aliased as tf
import_header = "import tensorflow as tf\n"
text = import_header + old_symbol
expected_text = import_header + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
import_header = ("import tensorflow as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
text = import_header + old_symbol
expected_text = import_header + new_symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
# We don't handle unaliased tensorflow imports currently,
# So the upgrade script show log errors
import_header = "import tensorflow\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("unaliased `import tensorflow`", "\n".join(errors))
# Upgrading explicitly-versioned tf code is unsafe, but we don't
# need to throw errors when we detect explicitly-versioned tf.
import_header = "import tensorflow.compat.v1 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf, v2 as tf2\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "import tensorflow.compat.v2 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v2` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf1, v2 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v2` was directly imported as `tf`",
report)
self.assertEmpty(errors)
def test_api_spec_reset_between_files(self):
for old_symbol, new_symbol in [
("tf.conj(a)", "tf.math.conj(a)"),
("tf.to_int32(x)", "tf.cast(x, dtype=tf.int32)")]:
## Test that the api spec is reset in between files:
import_header = "import tensorflow.compat.v2 as tf\n"
text_a = import_header + old_symbol
expected_text_a = import_header + old_symbol
text_b = old_symbol
expected_text_b = new_symbol
results = self._upgrade_multiple([text_a, text_b])
result_a, result_b = results[0], results[1]
self.assertEqual(result_a[3], expected_text_a)
self.assertEqual(result_b[3], expected_text_b)
def test_model_to_estimator_checkpoint_warning(self):
text = "tf.keras.estimator.model_to_estimator(model)"
_, report, _, _ = self._upgrade(text)
expected_info = "will save object-based checkpoints"
self.assertIn(expected_info, report)
def test_keras_experimental_export_warning(self):
text = "tf.keras.experimental.export_saved_model"
_, report, _, _ = self._upgrade(text)
expected_info = "Please use model.save"
self.assertIn(expected_info, report)
class TestUpgradeFiles(test_util.TensorFlowTestCase):
def testInplace(self):
"""Check to make sure we don't have a file system race."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "tf.conj(a)\n"
upgraded = "tf.math.conj(a)\n"
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
def testInplaceNoOutputChangeOnErrorHandling(self):
"""In place file should not be modified when parsing error is handled."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "print 'a' \n"
upgraded = "print 'a' \n"
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
upgrader.process_file(
temp_file.name, temp_file.name, no_change_to_outfile_on_error=True)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
def testInplaceEmptyOutputOnError(self):
"""In place file becomes empty when parsing error is not handled."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "print 'a' \n"
upgraded = ""
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
if __name__ == "__main__":
test_lib.main()
|
the-stack_106_28215 | # Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common logic to assign test cases to CI jobs.
Some background knowledge about Gitlab CI and use flow in esp-idf:
* Gitlab CI jobs are static in ``.gitlab-ci.yml``. We can't dynamically create test jobs
* For test job running on DUT, we use ``tags`` to select runners with different test environment
* We have ``assign_test`` stage, will collect cases, and then assign them to correct test jobs
* ``assign_test`` will fail if failed to assign any cases
* with ``assign_test``, we can:
* dynamically filter test case we want to test
* alert user if they forget to add CI jobs and guide how to add test jobs
* the last step of ``assign_test`` is to output config files, then test jobs will run these cases
The Basic logic to assign test cases is as follow:
1. do search all the cases
2. do filter case (if filter is specified by @bot)
3. put cases to different groups according to rule of ``Group``
* try to put them in existed groups
* if failed then create a new group and add this case
4. parse and filter the test jobs from CI config file
5. try to assign all groups to jobs according to tags
6. output config files for jobs
"""
import os
import re
import json
import yaml
from Utility import (CaseConfig, SearchCases, GitlabCIJob)
class Group(object):
MAX_EXECUTION_TIME = 30
MAX_CASE = 15
SORT_KEYS = ["env_tag"]
# Matching CI job rules could be different from the way we want to group test cases.
# For example, when assign unit test cases, different test cases need to use different test functions.
# We need to put them into different groups.
# But these groups can be assigned to jobs with same tags, as they use the same test environment.
CI_JOB_MATCH_KEYS = SORT_KEYS
def __init__(self, case):
self.execution_time = 0
self.case_list = [case]
self.filters = dict(zip(self.SORT_KEYS, [self._get_case_attr(case, x) for x in self.SORT_KEYS]))
self.ci_job_match_keys = dict(zip(self.CI_JOB_MATCH_KEYS,
[self._get_case_attr(case, x) for x in self.CI_JOB_MATCH_KEYS]))
@staticmethod
def _get_case_attr(case, attr):
# we might use different type for case (dict or test_func)
# this method will do get attribute form cases
return case.case_info[attr]
def accept_new_case(self):
"""
check if allowed to add any case to this group
:return: True or False
"""
max_time = (sum([self._get_case_attr(x, "execution_time") for x in self.case_list])
< self.MAX_EXECUTION_TIME)
max_case = (len(self.case_list) < self.MAX_CASE)
return max_time and max_case
def add_case(self, case):
"""
add case to current group
:param case: test case
:return: True if add succeed, else False
"""
added = False
if self.accept_new_case():
for key in self.filters:
if self._get_case_attr(case, key) != self.filters[key]:
break
else:
self.case_list.append(case)
added = True
return added
def output(self):
"""
output data for job configs
:return: {"Filter": case filter, "CaseConfig": list of case configs for cases in this group}
"""
output_data = {
"Filter": self.filters,
"CaseConfig": [{"name": self._get_case_attr(x, "name")} for x in self.case_list],
}
return output_data
class AssignTest(object):
"""
Auto assign tests to CI jobs.
:param test_case_path: path of test case file(s)
:param ci_config_file: path of ``.gitlab-ci.yml``
"""
# subclass need to rewrite CI test job pattern, to filter all test jobs
CI_TEST_JOB_PATTERN = re.compile(r"^test_.+")
def __init__(self, test_case_path, ci_config_file, case_group=Group):
self.test_case_path = test_case_path
self.test_cases = []
self.jobs = self._parse_gitlab_ci_config(ci_config_file)
self.case_group = case_group
def _parse_gitlab_ci_config(self, ci_config_file):
with open(ci_config_file, "r") as f:
ci_config = yaml.load(f)
job_list = list()
for job_name in ci_config:
if self.CI_TEST_JOB_PATTERN.search(job_name) is not None:
job_list.append(GitlabCIJob.Job(ci_config[job_name], job_name))
return job_list
@staticmethod
def _search_cases(test_case_path, case_filter=None):
"""
:param test_case_path: path contains test case folder
:param case_filter: filter for test cases
:return: filtered test case list
"""
test_methods = SearchCases.Search.search_test_cases(test_case_path)
return CaseConfig.filter_test_cases(test_methods, case_filter if case_filter else dict())
def _group_cases(self):
"""
separate all cases into groups according group rules. each group will be executed by one CI job.
:return: test case groups.
"""
groups = []
for case in self.test_cases:
for group in groups:
# add to current group
if group.add_case(case):
break
else:
# create new group
groups.append(self.case_group(case))
return groups
@staticmethod
def _apply_bot_filter():
"""
we support customize CI test with bot.
here we process from and return the filter which ``_search_cases`` accepts.
:return: filter for search test cases
"""
bot_filter = os.getenv("BOT_CASE_FILTER")
if bot_filter:
bot_filter = json.loads(bot_filter)
else:
bot_filter = dict()
return bot_filter
def assign_cases(self):
"""
separate test cases to groups and assign test cases to CI jobs.
:raise AssertError: if failed to assign any case to CI job.
:return: None
"""
failed_to_assign = []
case_filter = self._apply_bot_filter()
self.test_cases = self._search_cases(self.test_case_path, case_filter)
test_groups = self._group_cases()
for group in test_groups:
for job in self.jobs:
if job.match_group(group):
job.assign_group(group)
break
else:
failed_to_assign.append(group)
assert not failed_to_assign
def output_configs(self, output_path):
"""
:param output_path: path to output config files for each CI job
:return: None
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
for job in self.jobs:
job.output_config(output_path)
|
the-stack_106_28216 | #!/usr/bin/env python
# coding: utf-8
import nose
import itertools
import os
import string
import warnings
from distutils.version import LooseVersion
from datetime import datetime, date
from pandas import (Series, DataFrame, MultiIndex, PeriodIndex, date_range,
bdate_range)
from pandas.compat import (range, lrange, StringIO, lmap, lzip, u, zip,
iteritems, OrderedDict, PY3)
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option
import numpy as np
from numpy import random
from numpy.random import rand, randn
from numpy.testing import assert_array_equal, assert_allclose
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
def _skip_if_mpl_14_or_dev_boxplot():
# GH 8382
# Boxplot failures on 1.4 and 1.4.1
# Don't need try / except since that's done at class level
import matplotlib
if str(matplotlib.__version__) >= LooseVersion('1.4'):
raise nose.SkipTest("Matplotlib Regression in 1.4 and current dev.")
def _skip_if_no_scipy_gaussian_kde():
try:
import scipy
from scipy.stats import gaussian_kde
except ImportError:
raise nose.SkipTest("scipy version doesn't support gaussian_kde")
def _ok_for_gaussian_kde(kind):
if kind in ['kde','density']:
try:
import scipy
from scipy.stats import gaussian_kde
except ImportError:
return False
return True
@tm.mplskip
class TestPlotBase(tm.TestCase):
def setUp(self):
import matplotlib as mpl
mpl.rcdefaults()
n = 100
with tm.RNGContext(42):
gender = tm.choice(['Male', 'Female'], size=n)
classroom = tm.choice(['A', 'B', 'C'], size=n)
self.hist_df = DataFrame({'gender': gender,
'classroom': classroom,
'height': random.normal(66, 4, size=n),
'weight': random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
if str(mpl.__version__) >= LooseVersion('1.4'):
self.bp_n_objects = 7
else:
self.bp_n_objects = 8
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
self.mpl_ge_1_3_1 = str(mpl.__version__) >= LooseVersion('1.3.1')
def tearDown(self):
tm.close()
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
@cache_readonly
def colorconverter(self):
import matplotlib.colors as colors
return colors.colorConverter
def _check_legend_labels(self, axes, labels=None, visible=True):
"""
Check each axes has expected legend labels
Parameters
----------
axes : matplotlib Axes object, or its list-like
labels : list-like
expected legend labels
visible : bool
expected legend visibility. labels are checked only when visible is True
"""
if visible and (labels is None):
raise ValueError('labels must be specified when visible is True')
axes = self._flatten_visible(axes)
for ax in axes:
if visible:
self.assertTrue(ax.get_legend() is not None)
self._check_text_labels(ax.get_legend().get_texts(), labels)
else:
self.assertTrue(ax.get_legend() is None)
def _check_data(self, xp, rs):
"""
Check each axes has identical lines
Parameters
----------
xp : matplotlib Axes object
rs : matplotlib Axes object
"""
xp_lines = xp.get_lines()
rs_lines = rs.get_lines()
def check_line(xpl, rsl):
xpdata = xpl.get_xydata()
rsdata = rsl.get_xydata()
assert_allclose(xpdata, rsdata)
self.assertEqual(len(xp_lines), len(rs_lines))
[check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)]
tm.close()
def _check_visible(self, collections, visible=True):
"""
Check each artist is visible or not
Parameters
----------
collections : matplotlib Artist or its list-like
target Artist or its list or collection
visible : bool
expected visibility
"""
from matplotlib.collections import Collection
if not isinstance(collections, Collection) and not com.is_list_like(collections):
collections = [collections]
for patch in collections:
self.assertEqual(patch.get_visible(), visible)
def _get_colors_mapped(self, series, colors):
unique = series.unique()
# unique and colors length can be differed
# depending on slice value
mapped = dict(zip(unique, colors))
return [mapped[v] for v in series.values]
def _check_colors(self, collections, linecolors=None, facecolors=None,
mapping=None):
"""
Check each artist has expected line colors and face colors
Parameters
----------
collections : list-like
list or collection of target artist
linecolors : list-like which has the same length as collections
list of expected line colors
facecolors : list-like which has the same length as collections
list of expected face colors
mapping : Series
Series used for color grouping key
used for andrew_curves, parallel_coordinates, radviz test
"""
from matplotlib.lines import Line2D
from matplotlib.collections import Collection
conv = self.colorconverter
if linecolors is not None:
if mapping is not None:
linecolors = self._get_colors_mapped(mapping, linecolors)
linecolors = linecolors[:len(collections)]
self.assertEqual(len(collections), len(linecolors))
for patch, color in zip(collections, linecolors):
if isinstance(patch, Line2D):
result = patch.get_color()
# Line2D may contains string color expression
result = conv.to_rgba(result)
else:
result = patch.get_edgecolor()
expected = conv.to_rgba(color)
self.assertEqual(result, expected)
if facecolors is not None:
if mapping is not None:
facecolors = self._get_colors_mapped(mapping, facecolors)
facecolors = facecolors[:len(collections)]
self.assertEqual(len(collections), len(facecolors))
for patch, color in zip(collections, facecolors):
if isinstance(patch, Collection):
# returned as list of np.array
result = patch.get_facecolor()[0]
else:
result = patch.get_facecolor()
if isinstance(result, np.ndarray):
result = tuple(result)
expected = conv.to_rgba(color)
self.assertEqual(result, expected)
def _check_text_labels(self, texts, expected):
"""
Check each text has expected labels
Parameters
----------
texts : matplotlib Text object, or its list-like
target text, or its list
expected : str or list-like which has the same length as texts
expected text label, or its list
"""
if not com.is_list_like(texts):
self.assertEqual(texts.get_text(), expected)
else:
labels = [t.get_text() for t in texts]
self.assertEqual(len(labels), len(expected))
for l, e in zip(labels, expected):
self.assertEqual(l, e)
def _check_ticks_props(self, axes, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None):
"""
Check each axes has expected tick properties
Parameters
----------
axes : matplotlib Axes object, or its list-like
xlabelsize : number
expected xticks font size
xrot : number
expected xticks rotation
ylabelsize : number
expected yticks font size
yrot : number
expected yticks rotation
"""
from matplotlib.ticker import NullFormatter
axes = self._flatten_visible(axes)
for ax in axes:
if xlabelsize or xrot:
if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter):
# If minor ticks has NullFormatter, rot / fontsize are not retained
labels = ax.get_xticklabels()
else:
labels = ax.get_xticklabels() + ax.get_xticklabels(minor=True)
for label in labels:
if xlabelsize is not None:
self.assertAlmostEqual(label.get_fontsize(), xlabelsize)
if xrot is not None:
self.assertAlmostEqual(label.get_rotation(), xrot)
if ylabelsize or yrot:
if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter):
labels = ax.get_yticklabels()
else:
labels = ax.get_yticklabels() + ax.get_yticklabels(minor=True)
for label in labels:
if ylabelsize is not None:
self.assertAlmostEqual(label.get_fontsize(), ylabelsize)
if yrot is not None:
self.assertAlmostEqual(label.get_rotation(), yrot)
def _check_ax_scales(self, axes, xaxis='linear', yaxis='linear'):
"""
Check each axes has expected scales
Parameters
----------
axes : matplotlib Axes object, or its list-like
xaxis : {'linear', 'log'}
expected xaxis scale
yaxis : {'linear', 'log'}
expected yaxis scale
"""
axes = self._flatten_visible(axes)
for ax in axes:
self.assertEqual(ax.xaxis.get_scale(), xaxis)
self.assertEqual(ax.yaxis.get_scale(), yaxis)
def _check_axes_shape(self, axes, axes_num=None, layout=None, figsize=(8.0, 6.0)):
"""
Check expected number of axes is drawn in expected layout
Parameters
----------
axes : matplotlib Axes object, or its list-like
axes_num : number
expected number of axes. Unnecessary axes should be set to invisible.
layout : tuple
expected layout, (expected number of rows , columns)
figsize : tuple
expected figsize. default is matplotlib default
"""
visible_axes = self._flatten_visible(axes)
if axes_num is not None:
self.assertEqual(len(visible_axes), axes_num)
for ax in visible_axes:
# check something drawn on visible axes
self.assertTrue(len(ax.get_children()) > 0)
if layout is not None:
result = self._get_axes_layout(plotting._flatten(axes))
self.assertEqual(result, layout)
self.assert_numpy_array_equal(np.round(visible_axes[0].figure.get_size_inches()),
np.array(figsize))
def _get_axes_layout(self, axes):
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
def _flatten_visible(self, axes):
"""
Flatten axes, and filter only visible
Parameters
----------
axes : matplotlib Axes object, or its list-like
"""
axes = plotting._flatten(axes)
axes = [ax for ax in axes if ax.get_visible()]
return axes
def _check_has_errorbars(self, axes, xerr=0, yerr=0):
"""
Check axes has expected number of errorbars
Parameters
----------
axes : matplotlib Axes object, or its list-like
xerr : number
expected number of x errorbar
yerr : number
expected number of y errorbar
"""
axes = self._flatten_visible(axes)
for ax in axes:
containers = ax.containers
xerr_count = 0
yerr_count = 0
for c in containers:
has_xerr = getattr(c, 'has_xerr', False)
has_yerr = getattr(c, 'has_yerr', False)
if has_xerr:
xerr_count += 1
if has_yerr:
yerr_count += 1
self.assertEqual(xerr, xerr_count)
self.assertEqual(yerr, yerr_count)
def _check_box_return_type(self, returned, return_type, expected_keys=None,
check_ax_title=True):
"""
Check box returned type is correct
Parameters
----------
returned : object to be tested, returned from boxplot
return_type : str
return_type passed to boxplot
expected_keys : list-like, optional
group labels in subplot case. If not passed,
the function checks assuming boxplot uses single ax
check_ax_title : bool
Whether to check the ax.title is the same as expected_key
Intended to be checked by calling from ``boxplot``.
Normal ``plot`` doesn't attach ``ax.title``, it must be disabled.
"""
from matplotlib.axes import Axes
types = {'dict': dict, 'axes': Axes, 'both': tuple}
if expected_keys is None:
# should be fixed when the returning default is changed
if return_type is None:
return_type = 'dict'
self.assertTrue(isinstance(returned, types[return_type]))
if return_type == 'both':
self.assertIsInstance(returned.ax, Axes)
self.assertIsInstance(returned.lines, dict)
else:
# should be fixed when the returning default is changed
if return_type is None:
for r in self._flatten_visible(returned):
self.assertIsInstance(r, Axes)
return
self.assertTrue(isinstance(returned, OrderedDict))
self.assertEqual(sorted(returned.keys()), sorted(expected_keys))
for key, value in iteritems(returned):
self.assertTrue(isinstance(value, types[return_type]))
# check returned dict has correct mapping
if return_type == 'axes':
if check_ax_title:
self.assertEqual(value.get_title(), key)
elif return_type == 'both':
if check_ax_title:
self.assertEqual(value.ax.get_title(), key)
self.assertIsInstance(value.ax, Axes)
self.assertIsInstance(value.lines, dict)
elif return_type == 'dict':
line = value['medians'][0]
if check_ax_title:
self.assertEqual(line.get_axes().get_title(), key)
else:
raise AssertionError
@tm.mplskip
class TestSeriesPlots(TestPlotBase):
def setUp(self):
TestPlotBase.setUp(self)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
axes = _check_plot_works(self.ts.plot, rot=0)
self._check_ticks_props(axes, xrot=0)
ax = _check_plot_works(self.ts.plot, style='.', logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
_check_plot_works(self.ts[:10].plot, kind='bar')
_check_plot_works(self.ts.plot, kind='area', stacked=False)
_check_plot_works(self.iseries.plot)
for kind in ['line', 'bar', 'barh', 'kde', 'hist', 'box']:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(self.series[:5].plot, kind=kind)
_check_plot_works(self.series[:10].plot, kind='barh')
ax = _check_plot_works(Series(randn(10)).plot, kind='bar', color='black')
self._check_colors([ax.patches[0]], facecolors=['black'])
# GH 6951
ax = _check_plot_works(self.ts.plot, subplots=True)
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(-1, 1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
@slow
def test_plot_figsize_and_title(self):
# figsize and title
ax = self.series.plot(title='Test', figsize=(16, 8))
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8))
def test_dont_modify_rcParams(self):
# GH 8242
colors = self.plt.rcParams['axes.color_cycle']
Series([1, 2, 3]).plot()
self.assertEqual(colors, self.plt.rcParams['axes.color_cycle'])
def test_ts_line_lim(self):
ax = self.ts.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0])
self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1])
tm.close()
ax = self.ts.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0])
self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1])
def test_ts_area_lim(self):
ax = self.ts.plot(kind='area', stacked=False)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
# GH 7471
ax = self.ts.plot(kind='area', stacked=False, x_compat=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
tz_ts = self.ts.copy()
tz_ts.index = tz_ts.tz_localize('GMT').tz_convert('CET')
ax = tz_ts.plot(kind='area', stacked=False, x_compat=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
ax = tz_ts.plot(kind='area', stacked=False, secondary_y=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
def test_line_area_nan_series(self):
values = [1, 2, np.nan, 3]
s = Series(values)
ts = Series(values, index=tm.makeDateIndex(k=4))
for d in [s, ts]:
ax = _check_plot_works(d.plot)
masked = ax.lines[0].get_ydata()
# remove nan for comparison purpose
self.assert_numpy_array_equal(np.delete(masked.data, 2), np.array([1, 2, 3]))
self.assert_numpy_array_equal(masked.mask, np.array([False, False, True, False]))
expected = np.array([1, 2, 0, 3])
ax = _check_plot_works(d.plot, stacked=True)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot, kind='area')
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot, kind='area', stacked=False)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
def test_line_use_index_false(self):
s = Series([1, 2, 3], index=['a', 'b', 'c'])
s.index.name = 'The Index'
ax = s.plot(use_index=False)
label = ax.get_xlabel()
self.assertEqual(label, '')
ax2 = s.plot(kind='bar', use_index=False)
label2 = ax2.get_xlabel()
self.assertEqual(label2, '')
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot(log=True, kind='bar')
assert_array_equal(ax.yaxis.get_ticklocs(), expected)
@slow
def test_bar_ignore_index(self):
df = Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
ax = df.plot(kind='bar', use_index=False)
self._check_text_labels(ax.get_xticklabels(), ['0', '1', '2', '3'])
def test_rotation(self):
df = DataFrame(randn(5, 5))
# Default rot 0
axes = df.plot()
self._check_ticks_props(axes, xrot=0)
axes = df.plot(rot=30)
self._check_ticks_props(axes, xrot=30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
self.assertEqual(xp, ax.get_xlim()[0])
@slow
def test_pie_series(self):
# if sum of values is less than 1.0, pie handle them as rate and draw semicircle.
series = Series(np.random.randint(1, 5),
index=['a', 'b', 'c', 'd', 'e'], name='YLABEL')
ax = _check_plot_works(series.plot, kind='pie')
self._check_text_labels(ax.texts, series.index)
self.assertEqual(ax.get_ylabel(), 'YLABEL')
# without wedge labels
ax = _check_plot_works(series.plot, kind='pie', labels=None)
self._check_text_labels(ax.texts, [''] * 5)
# with less colors than elements
color_args = ['r', 'g', 'b']
ax = _check_plot_works(series.plot, kind='pie', colors=color_args)
color_expected = ['r', 'g', 'b', 'r', 'g']
self._check_colors(ax.patches, facecolors=color_expected)
# with labels and colors
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
ax = _check_plot_works(series.plot, kind='pie', labels=labels, colors=color_args)
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
# with autopct and fontsize
ax = _check_plot_works(series.plot, kind='pie', colors=color_args,
autopct='%.2f', fontsize=7)
pcts = ['{0:.2f}'.format(s * 100) for s in series.values / float(series.sum())]
iters = [iter(series.index), iter(pcts)]
expected_texts = list(next(it) for it in itertools.cycle(iters))
self._check_text_labels(ax.texts, expected_texts)
for t in ax.texts:
self.assertEqual(t.get_fontsize(), 7)
# includes negative value
with tm.assertRaises(ValueError):
series = Series([1, 2, 0, 4, -1], index=['a', 'b', 'c', 'd', 'e'])
series.plot(kind='pie')
# includes nan
series = Series([1, 2, np.nan, 4],
index=['a', 'b', 'c', 'd'], name='YLABEL')
ax = _check_plot_works(series.plot, kind='pie')
self._check_text_labels(ax.texts, ['a', 'b', '', 'd'])
def test_pie_nan(self):
s = Series([1, np.nan, 1, 1])
ax = s.plot(kind='pie', legend=True)
expected = ['0', '', '2', '3']
result = [x.get_text() for x in ax.texts]
self.assertEqual(result, expected)
@slow
def test_hist_df_kwargs(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.plot(kind='hist', bins=5)
self.assertEqual(len(ax.patches), 10)
@slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
_check_plot_works(self.ts.hist, by=self.ts.index.month)
_check_plot_works(self.ts.hist, by=self.ts.index.month, bins=5)
fig, ax = self.plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = self.plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
self.assertEqual(len(ax.patches), 2)
@slow
def test_hist_layout(self):
df = self.hist_df
with tm.assertRaises(ValueError):
df.height.hist(layout=(1, 1))
with tm.assertRaises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
df = self.hist_df
axes = _check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
axes = _check_plot_works(df.height.hist, by=df.gender, layout=(3, -1))
self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = _check_plot_works(df.height.hist, by=df.category, layout=(2, -1))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
axes = _check_plot_works(df.height.hist, by=df.category, layout=(3, -1))
self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
axes = _check_plot_works(df.height.hist, by=df.category, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
axes = _check_plot_works(df.height.hist, by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7))
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
@slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with tm.assertRaises(ValueError):
x.plot(style='k--', color='k')
@slow
def test_hist_by_no_extra_plots(self):
df = self.hist_df
axes = df.height.hist(by=df.gender)
self.assertEqual(len(self.plt.get_fignums()), 1)
def test_plot_fails_when_ax_differs_from_figure(self):
from pylab import figure
fig1 = figure()
fig2 = figure()
ax1 = fig1.add_subplot(111)
with tm.assertRaises(AssertionError):
self.ts.hist(ax=ax1, figure=fig2)
@slow
def test_hist_kde(self):
ax = self.ts.plot(kind='hist', logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
# ticks are values, thus ticklabels are blank
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
_check_plot_works(self.ts.plot, kind='kde')
_check_plot_works(self.ts.plot, kind='density')
ax = self.ts.plot(kind='kde', logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@slow
def test_kde_kwargs(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from numpy import linspace
_check_plot_works(self.ts.plot, kind='kde', bw_method=.5, ind=linspace(-100,100,20))
_check_plot_works(self.ts.plot, kind='density', bw_method=.5, ind=linspace(-100,100,20))
ax = self.ts.plot(kind='kde', logy=True, bw_method=.5, ind=linspace(-100,100,20))
self._check_ax_scales(ax, yaxis='log')
self._check_text_labels(ax.yaxis.get_label(), 'Density')
@slow
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
s = Series(np.random.uniform(size=50))
s[0] = np.nan
ax = _check_plot_works(s.plot, kind='kde')
@slow
def test_hist_kwargs(self):
ax = self.ts.plot(kind='hist', bins=5)
self.assertEqual(len(ax.patches), 5)
self._check_text_labels(ax.yaxis.get_label(), 'Degree')
tm.close()
if self.mpl_ge_1_3_1:
ax = self.ts.plot(kind='hist', orientation='horizontal')
self._check_text_labels(ax.xaxis.get_label(), 'Degree')
tm.close()
ax = self.ts.plot(kind='hist', align='left', stacked=True)
tm.close()
@slow
def test_hist_kde_color(self):
ax = self.ts.plot(kind='hist', logy=True, bins=10, color='b')
self._check_ax_scales(ax, yaxis='log')
self.assertEqual(len(ax.patches), 10)
self._check_colors(ax.patches, facecolors=['b'] * 10)
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
ax = self.ts.plot(kind='kde', logy=True, color='r')
self._check_ax_scales(ax, yaxis='log')
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self._check_colors(lines, ['r'])
@slow
def test_boxplot_series(self):
ax = self.ts.plot(kind='box', logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [self.ts.name])
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@slow
def test_autocorrelation_plot(self):
from pandas.tools.plotting import autocorrelation_plot
_check_plot_works(autocorrelation_plot, self.ts)
_check_plot_works(autocorrelation_plot, self.ts.values)
ax = autocorrelation_plot(self.ts, label='Test')
self._check_legend_labels(ax, labels=['Test'])
@slow
def test_lag_plot(self):
from pandas.tools.plotting import lag_plot
_check_plot_works(lag_plot, self.ts)
_check_plot_works(lag_plot, self.ts, lag=5)
@slow
def test_bootstrap_plot(self):
from pandas.tools.plotting import bootstrap_plot
_check_plot_works(bootstrap_plot, self.ts, size=10)
def test_invalid_plot_data(self):
s = Series(list('abcd'))
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
s.plot(kind=kind)
@slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
s.plot(kind=kind)
def test_invalid_kind(self):
s = Series([1, 2])
with tm.assertRaises(ValueError):
s.plot(kind='aasdf')
@slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@slow
def test_errorbar_plot(self):
s = Series(np.arange(10), name='x')
s_err = np.random.randn(10)
d_err = DataFrame(randn(10, 2), index=s.index, columns=['x', 'y'])
# test line and bar plots
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=1, yerr=1)
ax = _check_plot_works(s.plot, xerr=s_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
# test time series plotting
ix = date_range('1/1/2000', '1/1/2001', freq='M')
ts = Series(np.arange(12), index=ix, name='x')
ts_err = Series(np.random.randn(12), index=ix)
td_err = DataFrame(randn(12, 2), index=ix, columns=['x', 'y'])
ax = _check_plot_works(ts.plot, yerr=ts_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(ts.plot, yerr=td_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
# check incorrect lengths and types
with tm.assertRaises(ValueError):
s.plot(yerr=np.arange(11))
s_err = ['zzz']*10
with tm.assertRaises(TypeError):
s.plot(yerr=s_err)
def test_table(self):
_check_plot_works(self.series.plot, table=True)
_check_plot_works(self.series.plot, table=self.series)
@tm.mplskip
class TestDataFramePlots(TestPlotBase):
def setUp(self):
TestPlotBase.setUp(self)
import matplotlib as mpl
mpl.rcdefaults()
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame({"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(size=20)})
from pandas import read_csv
path = os.path.join(curpath(), 'data', 'iris.csv')
self.iris = read_csv(path)
@slow
def test_plot(self):
df = self.tdf
_check_plot_works(df.plot, grid=False)
axes = _check_plot_works(df.plot, subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = _check_plot_works(df.plot, subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
axes = _check_plot_works(df.plot, subplots=True, use_index=False)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
with tm.assertRaises(TypeError):
df.plot(kind='line', blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
_check_plot_works(df.plot, subplots=True, title='blah')
# We have to redo it here because _check_plot_works does two plots, once without an ax
# kwarg and once with an ax kwarg and the new sharex behaviour does not remove the
# visibility of the latter axis (as ax is present).
# see: https://github.com/pydata/pandas/issues/9737
axes = df.plot(subplots=True, title='blah')
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
#axes[0].figure.savefig("test.png")
for ax in axes[:2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes[2]]:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
self._check_ticks_props(ax, xrot=0)
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
# GH 6951
# Test with single column
df = DataFrame({'x': np.random.rand(10)})
axes = _check_plot_works(df.plot, kind='bar', subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
axes = _check_plot_works(df.plot, kind='bar', subplots=True,
layout=(-1, 1))
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.plot(kind='bar', subplots=True, ax=ax)
self.assertEqual(len(axes), 1)
self.assertIs(ax.get_axes(), axes[0])
def test_nonnumeric_exclude(self):
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
self.assertEqual(len(ax.get_lines()), 1) # B was plotted
@slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self._check_text_labels(ax.xaxis.get_label(), 'a')
@slow
def test_donot_overwrite_index_name(self):
# GH 8494
df = DataFrame(randn(2, 2), columns=['a', 'b'])
df.index.name = 'NAME'
df.plot(y='b', label='LABEL')
self.assertEqual(df.index.name, 'NAME')
@slow
def test_plot_xy(self):
# columns.inferred_type == 'string'
df = self.tdf
self._check_data(df.plot(x=0, y=1),
df.set_index('A')['B'].plot())
self._check_data(df.plot(x=0), df.set_index('A').plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x='A', y='B'),
df.set_index('A').B.plot())
self._check_data(df.plot(x='A'), df.set_index('A').plot())
self._check_data(df.plot(y='B'), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = lrange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2),
df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8))
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16., 8.))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@slow
def test_logscales(self):
df = DataFrame({'a': np.arange(100)},
index=np.arange(100))
ax = df.plot(logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = df.plot(logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = df.plot(loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
@slow
def test_xcompat(self):
import pandas as pd
df = self.tdf
ax = df.plot(x_compat=True)
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plot_params['xaxis.compat'] = True
ax = df.plot()
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plot_params['x_compat'] = False
ax = df.plot()
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
self.assertIsInstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with pd.plot_params.use('x_compat', True):
ax = df.plot()
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
ax = df.plot()
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
self.assertIsInstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
def test_period_compat(self):
# GH 9012
# period-array conversions
df = DataFrame(
np.random.rand(21, 2),
index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)),
columns=['a', 'b'])
df.plot()
self.plt.axhline(y=0)
tm.close()
def test_unsorted_index(self):
df = DataFrame({'y': np.arange(100)},
index=np.arange(99, -1, -1), dtype=np.int64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64)
tm.assert_series_equal(rs, df.y)
@slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
for kind in ['bar', 'barh', 'line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
self.assertEqual(axes.shape, (3, ))
for ax, column in zip(axes, df.columns):
self._check_legend_labels(ax, labels=[com.pprint_thing(column)])
for ax in axes[:-2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
axes = df.plot(kind=kind, subplots=True, sharex=False)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
axes = df.plot(kind=kind, subplots=True, legend=False)
for ax in axes:
self.assertTrue(ax.get_legend() is None)
@slow
def test_subplots_timeseries(self):
idx = date_range(start='2014-07-01', freq='M', periods=10)
df = DataFrame(np.random.rand(10, 3), index=idx)
for kind in ['line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes[:-2]:
# GH 7801
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
self._check_ticks_props(axes, xrot=0)
axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
self._check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7)
@slow
def test_subplots_layout(self):
# GH 6667
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
self.assertEqual(axes.shape, (2, 2))
axes = df.plot(subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
self.assertEqual(axes.shape, (2, 2))
axes = df.plot(subplots=True, layout=(2, -1))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
self.assertEqual(axes.shape, (2, 2))
axes = df.plot(subplots=True, layout=(1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
self.assertEqual(axes.shape, (1, 4))
axes = df.plot(subplots=True, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
self.assertEqual(axes.shape, (1, 4))
axes = df.plot(subplots=True, layout=(4, -1))
self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
self.assertEqual(axes.shape, (4, 1))
with tm.assertRaises(ValueError):
axes = df.plot(subplots=True, layout=(1, 1))
with tm.assertRaises(ValueError):
axes = df.plot(subplots=True, layout=(-1, -1))
# single column
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
self.assertEqual(axes.shape, (1, ))
axes = df.plot(subplots=True, layout=(3, 3))
self._check_axes_shape(axes, axes_num=1, layout=(3, 3))
self.assertEqual(axes.shape, (3, 3))
@slow
def test_subplots_warnings(self):
# GH 9464
warnings.simplefilter('error')
try:
df = DataFrame(np.random.randn(100, 4))
df.plot(subplots=True, layout=(3, 2))
df = DataFrame(np.random.randn(100, 4),
index=date_range('1/1/2000', periods=100))
df.plot(subplots=True, layout=(3, 2))
except Warning as w:
self.fail(w)
warnings.simplefilter('default')
@slow
def test_subplots_multiple_axes(self):
# GH 5353, 6970, GH 7069
fig, axes = self.plt.subplots(2, 3)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
self.assertEqual(returned.shape, (3, ))
self.assertIs(returned[0].figure, fig)
# draw on second row
returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
self.assertEqual(returned.shape, (3, ))
self.assertIs(returned[0].figure, fig)
self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
tm.close()
with tm.assertRaises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
df.plot(subplots=True, ax=axes)
# pass 2-dim axes and invalid layout
# invalid lauout should not affect to input and return value
# (show warning is tested in
# TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
fig, axes = self.plt.subplots(2, 2)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
df = DataFrame(np.random.rand(10, 4),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes, layout=(2, 1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
self.assertEqual(returned.shape, (4, ))
returned = df.plot(subplots=True, ax=axes, layout=(2, -1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
self.assertEqual(returned.shape, (4, ))
returned = df.plot(subplots=True, ax=axes, layout=(-1, 2),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
self.assertEqual(returned.shape, (4, ))
# single column
fig, axes = self.plt.subplots(1, 1)
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
self.assertEqual(axes.shape, (1, ))
def test_subplots_ts_share_axes(self):
# GH 3964
fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
df = DataFrame(np.random.randn(10, 9), index=date_range(start='2014-07-01', freq='M', periods=10))
for i, ax in enumerate(axes.ravel()):
df[i].plot(ax=ax, fontsize=5)
#Rows other than bottom should not be visible
for ax in axes[0:-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=False)
#Bottom row should be visible
for ax in axes[-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=True)
#First column should be visible
for ax in axes[[0, 1, 2], [0]].ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
#Other columns should not be visible
for ax in axes[[0, 1, 2], [1]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes[[0, 1, 2], [2]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
def test_negative_log(self):
df = - DataFrame(rand(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
with tm.assertRaises(ValueError):
df.plot(kind='area', logy=True)
with tm.assertRaises(ValueError):
df.plot(kind='area', loglog=True)
def _compare_stacked_y_cood(self, normal_lines, stacked_lines):
base = np.zeros(len(normal_lines[0].get_data()[1]))
for nl, sl in zip(normal_lines, stacked_lines):
base += nl.get_data()[1] # get y coodinates
sy = sl.get_data()[1]
self.assert_numpy_array_equal(base, sy)
def test_line_area_stacked(self):
with tm.RNGContext(42):
df = DataFrame(rand(6, 4),
columns=['w', 'x', 'y', 'z'])
neg_df = - df
# each column has either positive or negative value
sep_df = DataFrame({'w': rand(6), 'x': rand(6),
'y': - rand(6), 'z': - rand(6)})
# each column has positive-negative mixed value
mixed_df = DataFrame(randn(6, 4), index=list(string.ascii_letters[:6]),
columns=['w', 'x', 'y', 'z'])
for kind in ['line', 'area']:
ax1 = _check_plot_works(df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(neg_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(neg_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2])
self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:])
_check_plot_works(mixed_df.plot, stacked=False)
with tm.assertRaises(ValueError):
mixed_df.plot(stacked=True)
_check_plot_works(df.plot, kind=kind, logx=True, stacked=True)
def test_line_area_nan_df(self):
values1 = [1, 2, np.nan, 3]
values2 = [3, np.nan, 2, 1]
df = DataFrame({'a': values1, 'b': values2})
tdf = DataFrame({'a': values1, 'b': values2}, index=tm.makeDateIndex(k=4))
for d in [df, tdf]:
ax = _check_plot_works(d.plot)
masked1 = ax.lines[0].get_ydata()
masked2 = ax.lines[1].get_ydata()
# remove nan for comparison purpose
self.assert_numpy_array_equal(np.delete(masked1.data, 2), np.array([1, 2, 3]))
self.assert_numpy_array_equal(np.delete(masked2.data, 1), np.array([3, 2, 1]))
self.assert_numpy_array_equal(masked1.mask, np.array([False, False, True, False]))
self.assert_numpy_array_equal(masked2.mask, np.array([False, True, False, False]))
expected1 = np.array([1, 2, 0, 3])
expected2 = np.array([3, 0, 2, 1])
ax = _check_plot_works(d.plot, stacked=True)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
ax = _check_plot_works(d.plot, kind='area')
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
ax = _check_plot_works(d.plot, kind='area', stacked=False)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)
def test_line_lim(self):
df = DataFrame(rand(6, 3), columns=['x', 'y', 'z'])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data()[0][0])
self.assertEqual(xmax, lines[0].get_data()[0][-1])
ax = df.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data()[0][0])
self.assertEqual(xmax, lines[0].get_data()[0][-1])
axes = df.plot(secondary_y=True, subplots=True)
for ax in axes:
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data()[0][0])
self.assertEqual(xmax, lines[0].get_data()[0][-1])
def test_area_lim(self):
df = DataFrame(rand(6, 4),
columns=['x', 'y', 'z', 'four'])
neg_df = - df
for stacked in [True, False]:
ax = _check_plot_works(df.plot, kind='area', stacked=stacked)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data()[0][0])
self.assertEqual(xmax, lines[0].get_data()[0][-1])
self.assertEqual(ymin, 0)
ax = _check_plot_works(neg_df.plot, kind='area', stacked=stacked)
ymin, ymax = ax.get_ylim()
self.assertEqual(ymax, 0)
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
default_colors = plt.rcParams.get('axes.color_cycle')
df = DataFrame(randn(5, 5))
ax = df.plot(kind='bar')
self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot(kind='bar', color=custom_colors)
self._check_colors(ax.patches[::5], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot(kind='bar', colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot(kind='bar', colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
ax = df.ix[:, [0]].plot(kind='bar', color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
@slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot(kind='bar', linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# stacked
ax = df.plot(kind='bar', stacked=True, linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# subplots
axes = df.plot(kind='bar', linewidth=2, subplots=True)
self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
@slow
def test_bar_barwidth(self):
df = DataFrame(randn(5, 5))
width = 0.9
# regular
ax = df.plot(kind='bar', width=width)
for r in ax.patches:
self.assertEqual(r.get_width(), width / len(df.columns))
# stacked
ax = df.plot(kind='bar', stacked=True, width=width)
for r in ax.patches:
self.assertEqual(r.get_width(), width)
# horizontal regular
ax = df.plot(kind='barh', width=width)
for r in ax.patches:
self.assertEqual(r.get_height(), width / len(df.columns))
# horizontal stacked
ax = df.plot(kind='barh', stacked=True, width=width)
for r in ax.patches:
self.assertEqual(r.get_height(), width)
# subplots
axes = df.plot(kind='bar', width=width, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_width(), width)
# horizontal subplots
axes = df.plot(kind='barh', width=width, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_height(), width)
@slow
def test_bar_barwidth_position(self):
df = DataFrame(randn(5, 5))
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9, position=0.2)
@slow
def test_bar_bottom_left(self):
df = DataFrame(rand(5, 5))
ax = df.plot(kind='bar', stacked=False, bottom=1)
result = [p.get_y() for p in ax.patches]
self.assertEqual(result, [1] * 25)
ax = df.plot(kind='bar', stacked=True, bottom=[-1, -2, -3, -4, -5])
result = [p.get_y() for p in ax.patches[:5]]
self.assertEqual(result, [-1, -2, -3, -4, -5])
ax = df.plot(kind='barh', stacked=False, left=np.array([1, 1, 1, 1, 1]))
result = [p.get_x() for p in ax.patches]
self.assertEqual(result, [1] * 25)
ax = df.plot(kind='barh', stacked=True, left=[1, 2, 3, 4, 5])
result = [p.get_x() for p in ax.patches[:5]]
self.assertEqual(result, [1, 2, 3, 4, 5])
axes = df.plot(kind='bar', subplots=True, bottom=-1)
for ax in axes:
result = [p.get_y() for p in ax.patches]
self.assertEqual(result, [-1] * 5)
axes = df.plot(kind='barh', subplots=True, left=np.array([1, 1, 1, 1, 1]))
for ax in axes:
result = [p.get_x() for p in ax.patches]
self.assertEqual(result, [1] * 5)
@slow
def test_bar_nan(self):
df = DataFrame({'A': [10, np.nan, 20], 'B': [5, 10, 20],
'C': [1, 2, 3]})
ax = df.plot(kind='bar')
expected = [10, 0, 20, 5, 10, 20, 1, 2, 3]
result = [p.get_height() for p in ax.patches]
self.assertEqual(result, expected)
ax = df.plot(kind='bar', stacked=True)
result = [p.get_height() for p in ax.patches]
self.assertEqual(result, expected)
result = [p.get_y() for p in ax.patches]
expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0]
self.assertEqual(result, expected)
@slow
def test_plot_scatter(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
_check_plot_works(df.plot, x='x', y='y', kind='scatter')
_check_plot_works(df.plot, x=1, y=2, kind='scatter')
with tm.assertRaises(ValueError):
df.plot(x='x', kind='scatter')
with tm.assertRaises(ValueError):
df.plot(y='y', kind='scatter')
# GH 6951
axes = df.plot(x='x', y='y', kind='scatter', subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@slow
def test_plot_scatter_with_c(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
axes = [df.plot(kind='scatter', x='x', y='y', c='z'),
df.plot(kind='scatter', x=0, y=1, c=2)]
for ax in axes:
# default to Greys
self.assertEqual(ax.collections[0].cmap.name, 'Greys')
if self.mpl_ge_1_3_1:
# n.b. there appears to be no public method to get the colorbar
# label
self.assertEqual(ax.collections[0].colorbar._label, 'z')
cm = 'cubehelix'
ax = df.plot(kind='scatter', x='x', y='y', c='z', colormap=cm)
self.assertEqual(ax.collections[0].cmap.name, cm)
# verify turning off colorbar works
ax = df.plot(kind='scatter', x='x', y='y', c='z', colorbar=False)
self.assertIs(ax.collections[0].colorbar, None)
# verify that we can still plot a solid color
ax = df.plot(x=0, y=1, c='red', kind='scatter')
self.assertIs(ax.collections[0].colorbar, None)
self._check_colors(ax.collections, facecolors=['r'])
# Ensure that we can pass an np.array straight through to matplotlib,
# this functionality was accidentally removed previously.
# See https://github.com/pydata/pandas/issues/8852 for bug report
#
# Exercise colormap path and non-colormap path as they are independent
#
df = DataFrame({'A': [1, 2], 'B': [3, 4]})
red_rgba = [1.0, 0.0, 0.0, 1.0]
green_rgba = [0.0, 1.0, 0.0, 1.0]
rgba_array = np.array([red_rgba, green_rgba])
ax = df.plot(kind='scatter', x='A', y='B', c=rgba_array)
# expect the face colors of the points in the non-colormap path to be
# identical to the values we supplied, normally we'd be on shaky ground
# comparing floats for equality but here we expect them to be
# identical.
self.assertTrue(
np.array_equal(
ax.collections[0].get_facecolor(),
rgba_array))
# we don't test the colors of the faces in this next plot because they
# are dependent on the spring colormap, which may change its colors
# later.
float_array = np.array([0.0, 1.0])
df.plot(kind='scatter', x='A', y='B', c=float_array, cmap='spring')
@slow
def test_plot_bar(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
_check_plot_works(df.plot, kind='bar')
_check_plot_works(df.plot, kind='bar', legend=False)
_check_plot_works(df.plot, kind='bar', subplots=True)
_check_plot_works(df.plot, kind='bar', stacked=True)
df = DataFrame(randn(10, 15),
index=list(string.ascii_letters[:10]),
columns=lrange(15))
_check_plot_works(df.plot, kind='bar')
df = DataFrame({'a': [0, 1], 'b': [1, 0]})
ax = _check_plot_works(df.plot, kind='bar')
self._check_ticks_props(ax, xrot=90)
ax = df.plot(kind='bar', rot=35, fontsize=10)
self._check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10)
ax = _check_plot_works(df.plot, kind='barh')
self._check_ticks_props(ax, yrot=0)
ax = df.plot(kind='barh', rot=55, fontsize=11)
self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)
def _check_bar_alignment(self, df, kind='bar', stacked=False,
subplots=False, align='center',
width=0.5, position=0.5):
axes = df.plot(kind=kind, stacked=stacked, subplots=subplots,
align=align, width=width, position=position,
grid=True)
axes = self._flatten_visible(axes)
for ax in axes:
if kind == 'bar':
axis = ax.xaxis
ax_min, ax_max = ax.get_xlim()
min_edge = min([p.get_x() for p in ax.patches])
max_edge = max([p.get_x() + p.get_width() for p in ax.patches])
elif kind == 'barh':
axis = ax.yaxis
ax_min, ax_max = ax.get_ylim()
min_edge = min([p.get_y() for p in ax.patches])
max_edge = max([p.get_y() + p.get_height() for p in ax.patches])
else:
raise ValueError
# GH 7498
# compare margins between lim and bar edges
self.assertAlmostEqual(ax_min, min_edge - 0.25)
self.assertAlmostEqual(ax_max, max_edge + 0.25)
p = ax.patches[0]
if kind == 'bar' and (stacked is True or subplots is True):
edge = p.get_x()
center = edge + p.get_width() * position
elif kind == 'bar' and stacked is False:
center = p.get_x() + p.get_width() * len(df.columns) * position
edge = p.get_x()
elif kind == 'barh' and (stacked is True or subplots is True):
center = p.get_y() + p.get_height() * position
edge = p.get_y()
elif kind == 'barh' and stacked is False:
center = p.get_y() + p.get_height() * len(df.columns) * position
edge = p.get_y()
else:
raise ValueError
# Check the ticks locates on integer
self.assertTrue((axis.get_ticklocs() == np.arange(len(df))).all())
if align == 'center':
# Check whether the bar locates on center
self.assertAlmostEqual(axis.get_ticklocs()[0], center)
elif align == 'edge':
# Check whether the bar's edge starts from the tick
self.assertAlmostEqual(axis.get_ticklocs()[0], edge)
else:
raise ValueError
return axes
@slow
def test_bar_stacked_center(self):
# GH2157
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9)
@slow
def test_bar_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9)
@slow
def test_bar_subplots_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9)
self._check_bar_alignment(df, kind='barh', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9)
@slow
def test_bar_align_single_column(self):
df = DataFrame(randn(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True)
@slow
def test_bar_edge(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=True,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True, align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True, align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True,
width=0.9, align='edge')
@slow
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([1., 10.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 100))
# no subplots
df = DataFrame({'A': [3] * 5, 'B': lrange(1, 6)}, index=lrange(5))
ax = df.plot(kind='bar', grid=True, log=True)
assert_array_equal(ax.yaxis.get_ticklocs(), expected)
@slow
def test_bar_log_subplots(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = DataFrame([Series([200, 300]),
Series([300, 500])]).plot(log=True, kind='bar',
subplots=True)
assert_array_equal(ax[0].yaxis.get_ticklocs(), expected)
assert_array_equal(ax[1].yaxis.get_ticklocs(), expected)
@slow
def test_boxplot(self):
df = self.hist_df
series = df['height']
numeric_cols = df._get_numeric_data().columns
labels = [com.pprint_thing(c) for c in numeric_cols]
ax = _check_plot_works(df.plot, kind='box')
self._check_text_labels(ax.get_xticklabels(), labels)
assert_array_equal(ax.xaxis.get_ticklocs(),
np.arange(1, len(numeric_cols) + 1))
self.assertEqual(len(ax.lines),
self.bp_n_objects * len(numeric_cols))
# different warning on py3
if not PY3:
axes = _check_plot_works(df.plot, kind='box',
subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, yaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_xticklabels(), [label])
self.assertEqual(len(ax.lines), self.bp_n_objects)
axes = series.plot(kind='box', rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = _check_plot_works(series.plot, kind='box')
positions = np.array([1, 6, 7])
ax = df.plot(kind='box', positions=positions)
numeric_cols = df._get_numeric_data().columns
labels = [com.pprint_thing(c) for c in numeric_cols]
self._check_text_labels(ax.get_xticklabels(), labels)
assert_array_equal(ax.xaxis.get_ticklocs(), positions)
self.assertEqual(len(ax.lines), self.bp_n_objects * len(numeric_cols))
@slow
def test_boxplot_vertical(self):
df = self.hist_df
numeric_cols = df._get_numeric_data().columns
labels = [com.pprint_thing(c) for c in numeric_cols]
# if horizontal, yticklabels are rotated
ax = df.plot(kind='box', rot=50, fontsize=8, vert=False)
self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)
self._check_text_labels(ax.get_yticklabels(), labels)
self.assertEqual(len(ax.lines), self.bp_n_objects * len(numeric_cols))
axes = _check_plot_works(df.plot, kind='box', subplots=True,
vert=False, logx=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, xaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_yticklabels(), [label])
self.assertEqual(len(ax.lines), self.bp_n_objects)
positions = np.array([3, 2, 8])
ax = df.plot(kind='box', positions=positions, vert=False)
self._check_text_labels(ax.get_yticklabels(), labels)
assert_array_equal(ax.yaxis.get_ticklocs(), positions)
self.assertEqual(len(ax.lines), self.bp_n_objects * len(numeric_cols))
@slow
def test_boxplot_return_type(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
with tm.assertRaises(ValueError):
df.plot(kind='box', return_type='NOTATYPE')
result = df.plot(kind='box', return_type='dict')
self._check_box_return_type(result, 'dict')
result = df.plot(kind='box', return_type='axes')
self._check_box_return_type(result, 'axes')
result = df.plot(kind='box', return_type='both')
self._check_box_return_type(result, 'both')
@slow
def test_boxplot_subplots_return_type(self):
df = self.hist_df
# normal style: return_type=None
result = df.plot(kind='box', subplots=True)
self.assertIsInstance(result, np.ndarray)
self._check_box_return_type(result, None,
expected_keys=['height', 'weight', 'category'])
for t in ['dict', 'axes', 'both']:
returned = df.plot(kind='box', return_type=t, subplots=True)
self._check_box_return_type(returned, t,
expected_keys=['height', 'weight', 'category'],
check_ax_title=False)
@slow
def test_boxplot_legacy(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
df['indic2'] = ['foo', 'bar', 'foo'] * 2
_check_plot_works(df.boxplot, return_type='dict')
_check_plot_works(df.boxplot, column=['one', 'two'], return_type='dict')
_check_plot_works(df.boxplot, column=['one', 'two'], by='indic')
_check_plot_works(df.boxplot, column='one', by=['indic', 'indic2'])
_check_plot_works(df.boxplot, by='indic')
_check_plot_works(df.boxplot, by=['indic', 'indic2'])
_check_plot_works(plotting.boxplot, df['one'], return_type='dict')
_check_plot_works(df.boxplot, notch=1, return_type='dict')
_check_plot_works(df.boxplot, by='indic', notch=1)
df = DataFrame(np.random.rand(10, 2), columns=['Col1', 'Col2'])
df['X'] = Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B'])
df['Y'] = Series(['A'] * 10)
_check_plot_works(df.boxplot, by='X')
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.boxplot('Col1', by='X', ax=ax)
self.assertIs(ax.get_axes(), axes)
fig, ax = self.plt.subplots()
axes = df.groupby('Y').boxplot(ax=ax, return_type='axes')
self.assertIs(ax.get_axes(), axes['A'])
# Multiple columns with an ax argument should use same figure
fig, ax = self.plt.subplots()
axes = df.boxplot(column=['Col1', 'Col2'], by='X', ax=ax, return_type='axes')
self.assertIs(axes['Col1'].get_figure(), fig)
# When by is None, check that all relevant lines are present in the dict
fig, ax = self.plt.subplots()
d = df.boxplot(ax=ax, return_type='dict')
lines = list(itertools.chain.from_iterable(d.values()))
self.assertEqual(len(ax.get_lines()), len(lines))
@slow
def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pydata/pandas/pull/7096
import matplotlib as mpl
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
with tm.assertRaises(ValueError):
df.boxplot(return_type='NOTATYPE')
with tm.assert_produces_warning(FutureWarning):
result = df.boxplot()
# change to Axes in future
self._check_box_return_type(result, 'dict')
with tm.assert_produces_warning(False):
result = df.boxplot(return_type='dict')
self._check_box_return_type(result, 'dict')
with tm.assert_produces_warning(False):
result = df.boxplot(return_type='axes')
self._check_box_return_type(result, 'axes')
with tm.assert_produces_warning(False):
result = df.boxplot(return_type='both')
self._check_box_return_type(result, 'both')
@slow
def test_boxplot_axis_limits(self):
def _check_ax_limits(col, ax):
y_min, y_max = ax.get_ylim()
self.assertTrue(y_min <= col.min())
self.assertTrue(y_max >= col.max())
df = self.hist_df.copy()
df['age'] = np.random.randint(1, 20, df.shape[0])
# One full row
height_ax, weight_ax = df.boxplot(['height', 'weight'], by='category')
_check_ax_limits(df['height'], height_ax)
_check_ax_limits(df['weight'], weight_ax)
self.assertEqual(weight_ax._sharey, height_ax)
# Two rows, one partial
p = df.boxplot(['height', 'weight', 'age'], by='category')
height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]
dummy_ax = p[1, 1]
_check_ax_limits(df['height'], height_ax)
_check_ax_limits(df['weight'], weight_ax)
_check_ax_limits(df['age'], age_ax)
self.assertEqual(weight_ax._sharey, height_ax)
self.assertEqual(age_ax._sharey, height_ax)
self.assertIsNone(dummy_ax._sharey)
@slow
def test_boxplot_empty_column(self):
_skip_if_mpl_14_or_dev_boxplot()
df = DataFrame(np.random.randn(20, 4))
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type='axes')
@slow
def test_kde_df(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
df = DataFrame(randn(100, 4))
ax = _check_plot_works(df.plot, kind='kde')
expected = [com.pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
self._check_ticks_props(ax, xrot=0)
ax = df.plot(kind='kde', rot=20, fontsize=5)
self._check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
axes = _check_plot_works(df.plot, kind='kde', subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.plot(kind='kde', logy=True, subplots=True)
self._check_ax_scales(axes, yaxis='log')
@slow
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
df = DataFrame(np.random.uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
ax = _check_plot_works(df.plot, kind='kde')
@slow
def test_hist_df(self):
if self.mpl_le_1_2_1:
raise nose.SkipTest("not supported in matplotlib <= 1.2.x")
df = DataFrame(randn(100, 4))
series = df[0]
ax = _check_plot_works(df.plot, kind='hist')
expected = [com.pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
axes = _check_plot_works(df.plot, kind='hist', subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
self._check_ax_scales(axes, yaxis='log')
axes = series.plot(kind='hist', rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = series.plot(kind='hist', normed=True, cumulative=True, bins=4)
# height of last bin (index 5) must be 1.0
self.assertAlmostEqual(ax.get_children()[5].get_height(), 1.0)
tm.close()
ax = series.plot(kind='hist', cumulative=True, bins=4)
self.assertAlmostEqual(ax.get_children()[5].get_height(), 100.0)
tm.close()
# if horizontal, yticklabels are rotated
axes = df.plot(kind='hist', rot=50, fontsize=8, orientation='horizontal')
self._check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8)
def _check_box_coord(self, patches, expected_y=None, expected_h=None,
expected_x=None, expected_w=None):
result_y = np.array([p.get_y() for p in patches])
result_height = np.array([p.get_height() for p in patches])
result_x = np.array([p.get_x() for p in patches])
result_width = np.array([p.get_width() for p in patches])
if expected_y is not None:
self.assert_numpy_array_equal(result_y, expected_y)
if expected_h is not None:
self.assert_numpy_array_equal(result_height, expected_h)
if expected_x is not None:
self.assert_numpy_array_equal(result_x, expected_x)
if expected_w is not None:
self.assert_numpy_array_equal(result_width, expected_w)
@slow
def test_hist_df_coord(self):
normal_df = DataFrame({'A': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([8, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([6, 7, 8, 9, 10]))},
columns=['A', 'B', 'C'])
nan_df = DataFrame({'A': np.repeat(np.array([np.nan, 1, 2, 3, 4, 5]),
np.array([3, 10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, np.nan, 2, 3, 4, 5]),
np.array([8, 3, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, np.nan, 4, 5]),
np.array([6, 7, 8, 3, 9, 10]))},
columns=['A', 'B', 'C'])
for df in [normal_df, nan_df]:
ax = df.plot(kind='hist', bins=5)
self._check_box_coord(ax.patches[:5], expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10], expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:], expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
ax = df.plot(kind='hist', bins=5, stacked=True)
self._check_box_coord(ax.patches[:5], expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10], expected_y=np.array([10, 9, 8, 7, 6]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:], expected_y=np.array([18, 17, 16, 15, 14]),
expected_h=np.array([6, 7, 8, 9, 10]))
axes = df.plot(kind='hist', bins=5, stacked=True, subplots=True)
self._check_box_coord(axes[0].patches, expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches, expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches, expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
if self.mpl_ge_1_3_1:
# horizontal
ax = df.plot(kind='hist', bins=5, orientation='horizontal')
self._check_box_coord(ax.patches[:5], expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10], expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:], expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
ax = df.plot(kind='hist', bins=5, stacked=True, orientation='horizontal')
self._check_box_coord(ax.patches[:5], expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10], expected_x=np.array([10, 9, 8, 7, 6]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:], expected_x=np.array([18, 17, 16, 15, 14]),
expected_w=np.array([6, 7, 8, 9, 10]))
axes = df.plot(kind='hist', bins=5, stacked=True,
subplots=True, orientation='horizontal')
self._check_box_coord(axes[0].patches, expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches, expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches, expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
@slow
def test_hist_df_legacy(self):
_check_plot_works(self.hist_df.hist)
# make sure layout is handled
df = DataFrame(randn(100, 3))
axes = _check_plot_works(df.hist, grid=False)
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
self.assertFalse(axes[1, 1].get_visible())
df = DataFrame(randn(100, 1))
_check_plot_works(df.hist)
# make sure layout is handled
df = DataFrame(randn(100, 6))
axes = _check_plot_works(df.hist, layout=(4, 2))
self._check_axes_shape(axes, axes_num=6, layout=(4, 2))
# make sure sharex, sharey is handled
_check_plot_works(df.hist, sharex=True, sharey=True)
# handle figsize arg
_check_plot_works(df.hist, figsize=(8, 10))
# check bins argument
_check_plot_works(df.hist, bins=5)
# make sure xlabelsize and xrot are handled
ser = df[0]
xf, yf = 20, 18
xrot, yrot = 30, 40
axes = ser.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,
ylabelsize=yf, yrot=yrot)
xf, yf = 20, 18
xrot, yrot = 30, 40
axes = df.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,
ylabelsize=yf, yrot=yrot)
tm.close()
# make sure kwargs to hist are handled
ax = ser.hist(normed=True, cumulative=True, bins=4)
# height of last bin (index 5) must be 1.0
self.assertAlmostEqual(ax.get_children()[5].get_height(), 1.0)
tm.close()
ax = ser.hist(log=True)
# scale of y must be 'log'
self._check_ax_scales(ax, yaxis='log')
tm.close()
# propagate attr exception from matplotlib.Axes.hist
with tm.assertRaises(AttributeError):
ser.hist(foo='bar')
@slow
def test_hist_layout(self):
df = DataFrame(randn(100, 3))
layout_to_expected_size = (
{'layout': None, 'expected_size': (2, 2)}, # default is 2x2
{'layout': (2, 2), 'expected_size': (2, 2)},
{'layout': (4, 1), 'expected_size': (4, 1)},
{'layout': (1, 4), 'expected_size': (1, 4)},
{'layout': (3, 3), 'expected_size': (3, 3)},
{'layout': (-1, 4), 'expected_size': (1, 4)},
{'layout': (4, -1), 'expected_size': (4, 1)},
{'layout': (-1, 2), 'expected_size': (2, 2)},
{'layout': (2, -1), 'expected_size': (2, 2)}
)
for layout_test in layout_to_expected_size:
axes = df.hist(layout=layout_test['layout'])
expected = layout_test['expected_size']
self._check_axes_shape(axes, axes_num=3, layout=expected)
# layout too small for all 4 plots
with tm.assertRaises(ValueError):
df.hist(layout=(1, 1))
# invalid format for layout
with tm.assertRaises(ValueError):
df.hist(layout=(1,))
with tm.assertRaises(ValueError):
df.hist(layout=(-1, -1))
@slow
def test_scatter(self):
tm._skip_if_no_scipy()
df = DataFrame(randn(100, 2))
def scat(**kwds):
return plotting.scatter_matrix(df, **kwds)
_check_plot_works(scat)
_check_plot_works(scat, marker='+')
_check_plot_works(scat, vmin=0)
if _ok_for_gaussian_kde('kde'):
_check_plot_works(scat, diagonal='kde')
if _ok_for_gaussian_kde('density'):
_check_plot_works(scat, diagonal='density')
_check_plot_works(scat, diagonal='hist')
_check_plot_works(scat, range_padding=.1)
def scat2(x, y, by=None, ax=None, figsize=None):
return plotting.scatter_plot(df, x, y, by, ax, figsize=None)
_check_plot_works(scat2, 0, 1)
grouper = Series(np.repeat([1, 2, 3, 4, 5], 20), df.index)
_check_plot_works(scat2, 0, 1, by=grouper)
def test_scatter_matrix_axis(self):
tm._skip_if_no_scipy()
scatter_matrix = plotting.scatter_matrix
with tm.RNGContext(42):
df = DataFrame(randn(100, 3))
axes = _check_plot_works(scatter_matrix, df, range_padding=.1)
axes0_labels = axes[0][0].yaxis.get_majorticklabels()
# GH 5662
expected = ['-2', '-1', '0', '1', '2']
self._check_text_labels(axes0_labels, expected)
self._check_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
df[0] = ((df[0] - 2) / 3)
axes = _check_plot_works(scatter_matrix, df, range_padding=.1)
axes0_labels = axes[0][0].yaxis.get_majorticklabels()
expected = ['-1.2', '-1.0', '-0.8', '-0.6', '-0.4', '-0.2', '0.0']
self._check_text_labels(axes0_labels, expected)
self._check_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
@slow
def test_andrews_curves(self):
from pandas.tools.plotting import andrews_curves
from matplotlib import cm
df = self.iris
_check_plot_works(andrews_curves, df, 'Name')
rgba = ('#556270', '#4ECDC4', '#C7F464')
ax = _check_plot_works(andrews_curves, df, 'Name', color=rgba)
self._check_colors(ax.get_lines()[:10], linecolors=rgba, mapping=df['Name'][:10])
cnames = ['dodgerblue', 'aquamarine', 'seagreen']
ax = _check_plot_works(andrews_curves, df, 'Name', color=cnames)
self._check_colors(ax.get_lines()[:10], linecolors=cnames, mapping=df['Name'][:10])
ax = _check_plot_works(andrews_curves, df, 'Name', colormap=cm.jet)
cmaps = lmap(cm.jet, np.linspace(0, 1, df['Name'].nunique()))
self._check_colors(ax.get_lines()[:10], linecolors=cmaps, mapping=df['Name'][:10])
colors = ['b', 'g', 'r']
df = DataFrame({"A": [1, 2, 3],
"B": [1, 2, 3],
"C": [1, 2, 3],
"Name": colors})
ax = andrews_curves(df, 'Name', color=colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, linecolors=colors)
with tm.assert_produces_warning(FutureWarning):
andrews_curves(data=df, class_column='Name')
@slow
def test_parallel_coordinates(self):
from pandas.tools.plotting import parallel_coordinates
from matplotlib import cm
df = self.iris
ax = _check_plot_works(parallel_coordinates, df, 'Name')
nlines = len(ax.get_lines())
nxticks = len(ax.xaxis.get_ticklabels())
rgba = ('#556270', '#4ECDC4', '#C7F464')
ax = _check_plot_works(parallel_coordinates, df, 'Name', color=rgba)
self._check_colors(ax.get_lines()[:10], linecolors=rgba, mapping=df['Name'][:10])
cnames = ['dodgerblue', 'aquamarine', 'seagreen']
ax = _check_plot_works(parallel_coordinates, df, 'Name', color=cnames)
self._check_colors(ax.get_lines()[:10], linecolors=cnames, mapping=df['Name'][:10])
ax = _check_plot_works(parallel_coordinates, df, 'Name', colormap=cm.jet)
cmaps = lmap(cm.jet, np.linspace(0, 1, df['Name'].nunique()))
self._check_colors(ax.get_lines()[:10], linecolors=cmaps, mapping=df['Name'][:10])
ax = _check_plot_works(parallel_coordinates, df, 'Name', axvlines=False)
assert len(ax.get_lines()) == (nlines - nxticks)
colors = ['b', 'g', 'r']
df = DataFrame({"A": [1, 2, 3],
"B": [1, 2, 3],
"C": [1, 2, 3],
"Name": colors})
ax = parallel_coordinates(df, 'Name', color=colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, linecolors=colors)
with tm.assert_produces_warning(FutureWarning):
parallel_coordinates(data=df, class_column='Name')
with tm.assert_produces_warning(FutureWarning):
parallel_coordinates(df, 'Name', colors=colors)
@slow
def test_radviz(self):
from pandas.tools.plotting import radviz
from matplotlib import cm
df = self.iris
_check_plot_works(radviz, df, 'Name')
rgba = ('#556270', '#4ECDC4', '#C7F464')
ax = _check_plot_works(radviz, df, 'Name', color=rgba)
# skip Circle drawn as ticks
patches = [p for p in ax.patches[:20] if p.get_label() != '']
self._check_colors(patches[:10], facecolors=rgba, mapping=df['Name'][:10])
cnames = ['dodgerblue', 'aquamarine', 'seagreen']
_check_plot_works(radviz, df, 'Name', color=cnames)
patches = [p for p in ax.patches[:20] if p.get_label() != '']
self._check_colors(patches, facecolors=cnames, mapping=df['Name'][:10])
_check_plot_works(radviz, df, 'Name', colormap=cm.jet)
cmaps = lmap(cm.jet, np.linspace(0, 1, df['Name'].nunique()))
patches = [p for p in ax.patches[:20] if p.get_label() != '']
self._check_colors(patches, facecolors=cmaps, mapping=df['Name'][:10])
colors = [[0., 0., 1., 1.],
[0., 0.5, 1., 1.],
[1., 0., 0., 1.]]
df = DataFrame({"A": [1, 2, 3],
"B": [2, 1, 3],
"C": [3, 2, 1],
"Name": ['b', 'g', 'r']})
ax = radviz(df, 'Name', color=colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, facecolors=colors)
@slow
def test_plot_int_columns(self):
df = DataFrame(randn(100, 4)).cumsum()
_check_plot_works(df.plot, legend=True)
@slow
def test_df_legend_labels(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
df2 = DataFrame(rand(3, 3), columns=['d', 'e', 'f'])
df3 = DataFrame(rand(3, 3), columns=['g', 'h', 'i'])
df4 = DataFrame(rand(3, 3), columns=['j', 'k', 'l'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=True)
self._check_legend_labels(ax, labels=df.columns)
ax = df2.plot(kind=kind, legend=False, ax=ax)
self._check_legend_labels(ax, labels=df.columns)
ax = df3.plot(kind=kind, legend=True, ax=ax)
self._check_legend_labels(ax, labels=df.columns.union(df3.columns))
ax = df4.plot(kind=kind, legend='reverse', ax=ax)
expected = list(df.columns.union(df3.columns)) + list(reversed(df4.columns))
self._check_legend_labels(ax, labels=expected)
# Secondary Y
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(kind='bar', legend=True, secondary_y='h', ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c', 'g', 'h (right)', 'i'])
# Time Series
ind = date_range('1/1/2014', periods=3)
df = DataFrame(randn(3, 3), columns=['a', 'b', 'c'], index=ind)
df2 = DataFrame(randn(3, 3), columns=['d', 'e', 'f'], index=ind)
df3 = DataFrame(randn(3, 3), columns=['g', 'h', 'i'], index=ind)
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(legend=True, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c', 'g', 'h', 'i'])
# scatter
ax = df.plot(kind='scatter', x='a', y='b', label='data1')
self._check_legend_labels(ax, labels=['data1'])
ax = df2.plot(kind='scatter', x='d', y='e', legend=False,
label='data2', ax=ax)
self._check_legend_labels(ax, labels=['data1'])
ax = df3.plot(kind='scatter', x='g', y='h', label='data3', ax=ax)
self._check_legend_labels(ax, labels=['data1', 'data3'])
# ensure label args pass through and
# index name does not mutate
# column names don't mutate
df5 = df.set_index('a')
ax = df5.plot(y='b')
self._check_legend_labels(ax, labels=['b'])
ax = df5.plot(y='b', label='LABEL_b')
self._check_legend_labels(ax, labels=['LABEL_b'])
self._check_text_labels(ax.xaxis.get_label(), 'a')
ax = df5.plot(y='c', label='LABEL_c', ax=ax)
self._check_legend_labels(ax, labels=['LABEL_b','LABEL_c'])
self.assertTrue(df5.columns.tolist() == ['b','c'])
def test_legend_name(self):
multi = DataFrame(randn(4, 4),
columns=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
multi.columns.names = ['group', 'individual']
ax = multi.plot()
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df = DataFrame(randn(5, 5))
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df.columns.name = 'new'
ax = df.plot(legend=False, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'new')
@slow
def test_no_legend(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=False)
self._check_legend_labels(ax, visible=False)
@slow
def test_style_by_column(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
df = DataFrame(randn(100, 3))
for markers in [{0: '^', 1: '+', 2: 'o'},
{0: '^', 1: '+'},
['^', '+', 'o'],
['^', '+']]:
fig.clf()
fig.add_subplot(111)
ax = df.plot(style=markers)
for i, l in enumerate(ax.get_lines()[:len(markers)]):
self.assertEqual(l.get_marker(), markers[i])
@slow
def test_line_label_none(self):
s = Series([1, 2])
ax = s.plot()
self.assertEqual(ax.get_legend(), None)
ax = s.plot(legend=True)
self.assertEqual(ax.get_legend().get_texts()[0].get_text(),
'None')
@slow
def test_line_colors(self):
import sys
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tmp = sys.stderr
sys.stderr = StringIO()
try:
tm.close()
ax2 = df.plot(colors=custom_colors)
lines2 = ax2.get_lines()
for l1, l2 in zip(ax.get_lines(), lines2):
self.assertEqual(l1.get_color(), l2.get_color())
finally:
sys.stderr = tmp
tm.close()
ax = df.plot(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
ax = df.ix[:, [0]].plot(color='DodgerBlue')
self._check_colors(ax.lines, linecolors=['DodgerBlue'])
@slow
def test_area_colors(self):
from matplotlib import cm
from matplotlib.collections import PolyCollection
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot(kind='area', color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=custom_colors)
handles, labels = ax.get_legend_handles_labels()
# legend is stored as Line2D, thus check linecolors
self._check_colors(handles, linecolors=custom_colors)
for h in handles:
self.assertTrue(h.get_alpha() is None)
tm.close()
ax = df.plot(kind='area', colormap='jet')
jet_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=jet_colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, linecolors=jet_colors)
for h in handles:
self.assertTrue(h.get_alpha() is None)
tm.close()
# When stacked=True, alpha is set to 0.5
ax = df.plot(kind='area', colormap=cm.jet, stacked=False)
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
self._check_colors(poly, facecolors=jet_with_alpha)
handles, labels = ax.get_legend_handles_labels()
# Line2D can't have alpha in its linecolor
self._check_colors(handles, linecolors=jet_colors)
for h in handles:
self.assertEqual(h.get_alpha(), 0.5)
@slow
def test_hist_colors(self):
default_colors = self.plt.rcParams.get('axes.color_cycle')
df = DataFrame(randn(5, 5))
ax = df.plot(kind='hist')
self._check_colors(ax.patches[::10], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot(kind='hist', color=custom_colors)
self._check_colors(ax.patches[::10], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot(kind='hist', colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot(kind='hist', colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
ax = df.ix[:, [0]].plot(kind='hist', color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
@slow
def test_kde_colors(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot(kind='kde', color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax = df.plot(kind='kde', colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot(kind='kde', colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
@slow
def test_boxplot_colors(self):
def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c='k', fliers_c='b'):
self._check_colors(bp['boxes'], linecolors=[box_c] * len(bp['boxes']))
self._check_colors(bp['whiskers'], linecolors=[whiskers_c] * len(bp['whiskers']))
self._check_colors(bp['medians'], linecolors=[medians_c] * len(bp['medians']))
self._check_colors(bp['fliers'], linecolors=[fliers_c] * len(bp['fliers']))
self._check_colors(bp['caps'], linecolors=[caps_c] * len(bp['caps']))
default_colors = self.plt.rcParams.get('axes.color_cycle')
df = DataFrame(randn(5, 5))
bp = df.plot(kind='box', return_type='dict')
_check_colors(bp, default_colors[0], default_colors[0], default_colors[2])
tm.close()
dict_colors = dict(boxes='#572923', whiskers='#982042',
medians='#804823', caps='#123456')
bp = df.plot(kind='box', color=dict_colors, sym='r+', return_type='dict')
_check_colors(bp, dict_colors['boxes'], dict_colors['whiskers'],
dict_colors['medians'], dict_colors['caps'], 'r')
tm.close()
# partial colors
dict_colors = dict(whiskers='c', medians='m')
bp = df.plot(kind='box', color=dict_colors, return_type='dict')
_check_colors(bp, default_colors[0], 'c', 'm')
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
bp = df.plot(kind='box', colormap='jet', return_type='dict')
jet_colors = lmap(cm.jet, np.linspace(0, 1, 3))
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# Test colormap functionality
bp = df.plot(kind='box', colormap=cm.jet, return_type='dict')
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# string color is applied to all artists except fliers
bp = df.plot(kind='box', color='DodgerBlue', return_type='dict')
_check_colors(bp, 'DodgerBlue', 'DodgerBlue', 'DodgerBlue',
'DodgerBlue')
# tuple is also applied to all artists except fliers
bp = df.plot(kind='box', color=(0, 1, 0), sym='#123456', return_type='dict')
_check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), '#123456')
with tm.assertRaises(ValueError):
# Color contains invalid key results in ValueError
df.plot(kind='box', color=dict(boxes='red', xxxx='blue'))
def test_default_color_cycle(self):
import matplotlib.pyplot as plt
plt.rcParams['axes.color_cycle'] = list('rgbk')
df = DataFrame(randn(5, 3))
ax = df.plot()
expected = plt.rcParams['axes.color_cycle'][:3]
self._check_colors(ax.get_lines(), linecolors=expected)
def test_unordered_ts(self):
df = DataFrame(np.array([3.0, 2.0, 1.0]),
index=[date(2012, 10, 1),
date(2012, 9, 1),
date(2012, 8, 1)],
columns=['test'])
ax = df.plot()
xticks = ax.lines[0].get_xdata()
self.assertTrue(xticks[0] < xticks[1])
ydata = ax.lines[0].get_ydata()
assert_array_equal(ydata, np.array([1.0, 2.0, 3.0]))
def test_all_invalid_plot_data(self):
df = DataFrame(list('abcd'))
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
df.plot(kind=kind)
@slow
def test_partially_invalid_plot_data(self):
with tm.RNGContext(42):
df = DataFrame(randn(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
df.plot(kind=kind)
with tm.RNGContext(42):
# area plot doesn't support positive/negative mixed data
kinds = ['area']
df = DataFrame(rand(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in kinds:
with tm.assertRaises(TypeError):
df.plot(kind=kind)
def test_invalid_kind(self):
df = DataFrame(randn(10, 2))
with tm.assertRaises(ValueError):
df.plot(kind='aasdf')
@slow
def test_hexbin_basic(self):
df = self.hexbin_df
ax = df.plot(kind='hexbin', x='A', y='B', gridsize=10)
# TODO: need better way to test. This just does existence.
self.assertEqual(len(ax.collections), 1)
# GH 6951
axes = df.plot(x='A', y='B', kind='hexbin', subplots=True)
# hexbin should have 2 axes in the figure, 1 for plotting and another is colorbar
self.assertEqual(len(axes[0].figure.axes), 2)
# return value is single axes
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@slow
def test_hexbin_with_c(self):
df = self.hexbin_df
ax = df.plot(kind='hexbin', x='A', y='B', C='C')
self.assertEqual(len(ax.collections), 1)
ax = df.plot(kind='hexbin', x='A', y='B', C='C',
reduce_C_function=np.std)
self.assertEqual(len(ax.collections), 1)
@slow
def test_hexbin_cmap(self):
df = self.hexbin_df
# Default to BuGn
ax = df.plot(kind='hexbin', x='A', y='B')
self.assertEqual(ax.collections[0].cmap.name, 'BuGn')
cm = 'cubehelix'
ax = df.plot(kind='hexbin', x='A', y='B', colormap=cm)
self.assertEqual(ax.collections[0].cmap.name, cm)
@slow
def test_no_color_bar(self):
df = self.hexbin_df
ax = df.plot(kind='hexbin', x='A', y='B', colorbar=None)
self.assertIs(ax.collections[0].colorbar, None)
@slow
def test_allow_cmap(self):
df = self.hexbin_df
ax = df.plot(kind='hexbin', x='A', y='B', cmap='YlGn')
self.assertEqual(ax.collections[0].cmap.name, 'YlGn')
with tm.assertRaises(TypeError):
df.plot(kind='hexbin', x='A', y='B', cmap='YlGn',
colormap='BuGn')
@slow
def test_pie_df(self):
df = DataFrame(np.random.rand(5, 3), columns=['X', 'Y', 'Z'],
index=['a', 'b', 'c', 'd', 'e'])
with tm.assertRaises(ValueError):
df.plot(kind='pie')
ax = _check_plot_works(df.plot, kind='pie', y='Y')
self._check_text_labels(ax.texts, df.index)
ax = _check_plot_works(df.plot, kind='pie', y=2)
self._check_text_labels(ax.texts, df.index)
axes = _check_plot_works(df.plot, kind='pie', subplots=True)
self.assertEqual(len(axes), len(df.columns))
for ax in axes:
self._check_text_labels(ax.texts, df.index)
for ax, ylabel in zip(axes, df.columns):
self.assertEqual(ax.get_ylabel(), ylabel)
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
axes = _check_plot_works(df.plot, kind='pie', subplots=True,
labels=labels, colors=color_args)
self.assertEqual(len(axes), len(df.columns))
for ax in axes:
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
def test_pie_df_nan(self):
df = DataFrame(np.random.rand(4, 4))
for i in range(4):
df.iloc[i, i] = np.nan
fig, axes = self.plt.subplots(ncols=4)
df.plot(kind='pie', subplots=True, ax=axes, legend=True)
base_expected = ['0', '1', '2', '3']
for i, ax in enumerate(axes):
expected = list(base_expected) # force copy
expected[i] = ''
result = [x.get_text() for x in ax.texts]
self.assertEqual(result, expected)
# legend labels
# NaN's not included in legend with subplots
# see https://github.com/pydata/pandas/issues/8390
self.assertEqual([x.get_text() for x in
ax.get_legend().get_texts()],
base_expected[:i] + base_expected[i+1:])
def test_errorbar_plot(self):
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12)*0.2, 'y': np.ones(12)*0.4}
df_err = DataFrame(d_err)
# check line plots
ax = _check_plot_works(df.plot, yerr=df_err, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err['x'], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err['x'], xerr=df_err['x'], kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
axes = _check_plot_works(df.plot, yerr=df_err, xerr=df_err, subplots=True, kind=kind)
self._check_has_errorbars(axes, xerr=1, yerr=1)
ax = _check_plot_works((df+1).plot, yerr=df_err, xerr=df_err, kind='bar', log=True)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# yerr is raw error values
ax = _check_plot_works(df['y'].plot, yerr=np.ones(12)*0.4)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot, yerr=np.ones((2, 12))*0.4)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is iterator
import itertools
ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df)))
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is column name
for yerr in ['yerr', u('誤差')]:
s_df = df.copy()
s_df[yerr] = np.ones(12)*0.2
ax = _check_plot_works(s_df.plot, yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(s_df.plot, y='y', x='x', yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=1)
with tm.assertRaises(ValueError):
df.plot(yerr=np.random.randn(11))
df_err = DataFrame({'x': ['zzz']*12, 'y': ['zzz']*12})
with tm.assertRaises(TypeError):
df.plot(yerr=df_err)
@slow
def test_errorbar_with_integer_column_names(self):
# test with integer column names
df = DataFrame(np.random.randn(10, 2))
df_err = DataFrame(np.random.randn(10, 2))
ax = _check_plot_works(df.plot, yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, y=0, yerr=1)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@slow
def test_errorbar_with_partial_columns(self):
df = DataFrame(np.random.randn(10, 3))
df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2])
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ix = date_range('1/1/2000', periods=10, freq='M')
df.set_index(ix, inplace=True)
df_err.set_index(ix, inplace=True)
ax = _check_plot_works(df.plot, yerr=df_err, kind='line')
self._check_has_errorbars(ax, xerr=0, yerr=2)
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12)*0.2, 'z': np.ones(12)*0.4}
df_err = DataFrame(d_err)
for err in [d_err, df_err]:
ax = _check_plot_works(df.plot, yerr=err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@slow
def test_errorbar_timeseries(self):
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
d_err = {'x': np.ones(12)*0.2, 'y': np.ones(12)*0.4}
# check time-series plots
ix = date_range('1/1/2000', '1/1/2001', freq='M')
tdf = DataFrame(d, index=ix)
tdf_err = DataFrame(d_err, index=ix)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, y='y', yerr=tdf_err['x'], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, y='y', yerr='x', kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
axes = _check_plot_works(tdf.plot, kind=kind, yerr=tdf_err, subplots=True)
self._check_has_errorbars(axes, xerr=0, yerr=1)
def test_errorbar_asymmetrical(self):
np.random.seed(0)
err = np.random.rand(3, 2, 5)
data = np.random.randn(5, 3)
df = DataFrame(data)
ax = df.plot(yerr=err, xerr=err/2)
self.assertEqual(ax.lines[7].get_ydata()[0], data[0,1]-err[1,0,0])
self.assertEqual(ax.lines[8].get_ydata()[0], data[0,1]+err[1,1,0])
self.assertEqual(ax.lines[5].get_xdata()[0], -err[1,0,0]/2)
self.assertEqual(ax.lines[6].get_xdata()[0], err[1,1,0]/2)
with tm.assertRaises(ValueError):
df.plot(yerr=err.T)
tm.close()
def test_table(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, table=True)
_check_plot_works(df.plot, table=df)
ax = df.plot()
self.assertTrue(len(ax.tables) == 0)
plotting.table(ax, df.T)
self.assertTrue(len(ax.tables) == 1)
def test_errorbar_scatter(self):
df = DataFrame(np.random.randn(5, 2), index=range(5), columns=['x', 'y'])
df_err = DataFrame(np.random.randn(5, 2) / 5,
index=range(5), columns=['x', 'y'])
ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y')
self._check_has_errorbars(ax, xerr=0, yerr=0)
ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y', xerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y', yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y',
xerr=df_err, yerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=1)
def _check_errorbar_color(containers, expected, has_err='has_xerr'):
errs = [c.lines[1][0] for c in ax.containers if getattr(c, has_err, False)]
self._check_colors(errs, linecolors=[expected] * len(errs))
# GH 8081
df = DataFrame(np.random.randn(10, 5), columns=['a', 'b', 'c', 'd', 'e'])
ax = df.plot(kind='scatter', x='a', y='b', xerr='d', yerr='e', c='red')
self._check_has_errorbars(ax, xerr=1, yerr=1)
_check_errorbar_color(ax.containers, 'red', has_err='has_xerr')
_check_errorbar_color(ax.containers, 'red', has_err='has_yerr')
ax = df.plot(kind='scatter', x='a', y='b', yerr='e', color='green')
self._check_has_errorbars(ax, xerr=0, yerr=1)
_check_errorbar_color(ax.containers, 'green', has_err='has_yerr')
def test_sharex_and_ax(self):
# https://github.com/pydata/pandas/issues/9737
# using gridspec, the axis in fig.get_axis() are sorted differently than pandas expected
# them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
plt.close('all')
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a":[1,2,3,4,5,6], "b":[1,2,3,4,5,6]})
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharex=True)
gs.tight_layout(plt.gcf())
for ax in plt.gcf().get_axes():
for label in ax.get_xticklabels():
self.assertEqual(label.get_visible(), ax.is_last_row(),
"x ticklabel has wrong visiblity")
self.assertEqual(ax.xaxis.get_label().get_visible(), ax.is_last_row(),
"x label has wrong visiblity")
plt.close('all')
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in plt.gcf().get_axes():
for label in ax.get_xticklabels():
self.assertTrue(label.get_visible(), "x ticklabel is invisible but shouldn't")
self.assertTrue(ax.xaxis.get_label().get_visible(),
"x label is invisible but shouldn't")
def test_sharey_and_ax(self):
# https://github.com/pydata/pandas/issues/9737
# using gridspec, the axis in fig.get_axis() are sorted differently than pandas expected
# them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
plt.close('all')
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a":[1,2,3,4,5,6], "b":[1,2,3,4,5,6]})
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharey=True)
gs.tight_layout(plt.gcf())
for ax in plt.gcf().get_axes():
for label in ax.get_yticklabels():
self.assertEqual(label.get_visible(), ax.is_first_col(),
"y ticklabel has wrong visiblity")
self.assertEqual(ax.yaxis.get_label().get_visible(), ax.is_first_col(),
"y label has wrong visiblity")
plt.close('all')
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in plt.gcf().get_axes():
for label in ax.get_yticklabels():
self.assertTrue(label.get_visible(), "y ticklabel is invisible but shouldn't")
self.assertTrue(ax.yaxis.get_label().get_visible(),
"y label is invisible but shouldn't")
@tm.mplskip
class TestDataFrameGroupByPlots(TestPlotBase):
@slow
def test_boxplot(self):
grouped = self.hist_df.groupby(by='gender')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
axes = _check_plot_works(grouped.boxplot, return_type='axes')
self._check_axes_shape(list(axes.values()), axes_num=2, layout=(1, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False,
return_type='axes')
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
grouped = df.groupby(level=1)
axes = _check_plot_works(grouped.boxplot, return_type='axes')
self._check_axes_shape(list(axes.values()), axes_num=10, layout=(4, 3))
axes = _check_plot_works(grouped.boxplot, subplots=False,
return_type='axes')
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
grouped = df.unstack(level=1).groupby(level=0, axis=1)
axes = _check_plot_works(grouped.boxplot, return_type='axes')
self._check_axes_shape(list(axes.values()), axes_num=3, layout=(2, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False,
return_type='axes')
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@slow
def test_grouped_plot_fignums(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = tm.choice(['male', 'female'], size=n)
df = DataFrame({'height': height, 'weight': weight, 'gender': gender})
gb = df.groupby('gender')
res = gb.plot()
self.assertEqual(len(self.plt.get_fignums()), 2)
self.assertEqual(len(res), 2)
tm.close()
res = gb.boxplot(return_type='axes')
self.assertEqual(len(self.plt.get_fignums()), 1)
self.assertEqual(len(res), 2)
tm.close()
# now works with GH 5610 as gender is excluded
res = df.groupby('gender').hist()
tm.close()
def test_series_plot_color_kwargs(self):
# GH1890
ax = Series(np.arange(12) + 1).plot(color='green')
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_kwargs(self):
# #1890
ax = Series(np.arange(12) + 1, index=date_range(
'1/1/2000', periods=12)).plot(color='green')
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_with_empty_kwargs(self):
import matplotlib as mpl
def_colors = mpl.rcParams['axes.color_cycle']
index = date_range('1/1/2000', periods=12)
s = Series(np.arange(1, 13), index=index)
ncolors = 3
for i in range(ncolors):
ax = s.plot()
self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors])
@slow
def test_grouped_hist(self):
df = DataFrame(randn(500, 2), columns=['A', 'B'])
df['C'] = np.random.randint(0, 4, 500)
df['D'] = ['X'] * 500
axes = plotting.grouped_hist(df.A, by=df.C)
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
tm.close()
axes = df.hist(by=df.C)
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
tm.close()
# group by a key with single value
axes = df.hist(by='D', rot=30)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
self._check_ticks_props(axes, xrot=30)
tm.close()
# make sure kwargs to hist are handled
xf, yf = 20, 18
xrot, yrot = 30, 40
axes = plotting.grouped_hist(df.A, by=df.C, normed=True,
cumulative=True, bins=4,
xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
# height of last bin (index 5) must be 1.0
for ax in axes.ravel():
height = ax.get_children()[5].get_height()
self.assertAlmostEqual(height, 1.0)
self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,
ylabelsize=yf, yrot=yrot)
tm.close()
axes = plotting.grouped_hist(df.A, by=df.C, log=True)
# scale of y must be 'log'
self._check_ax_scales(axes, yaxis='log')
tm.close()
# propagate attr exception from matplotlib.Axes.hist
with tm.assertRaises(AttributeError):
plotting.grouped_hist(df.A, by=df.C, foo='bar')
with tm.assert_produces_warning(FutureWarning):
df.hist(by='C', figsize='default')
@slow
def test_grouped_hist2(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender_int = tm.choice([0, 1], size=n)
df_int = DataFrame({'height': height, 'weight': weight,
'gender': gender_int})
gb = df_int.groupby('gender')
axes = gb.hist()
self.assertEqual(len(axes), 2)
self.assertEqual(len(self.plt.get_fignums()), 2)
tm.close()
@slow
def test_grouped_box_return_type(self):
df = self.hist_df
# old style: return_type=None
result = df.boxplot(by='gender')
self.assertIsInstance(result, np.ndarray)
self._check_box_return_type(result, None,
expected_keys=['height', 'weight', 'category'])
# now for groupby
with tm.assert_produces_warning(FutureWarning):
result = df.groupby('gender').boxplot()
self._check_box_return_type(result, 'dict', expected_keys=['Male', 'Female'])
columns2 = 'X B C D A G Y N Q O'.split()
df2 = DataFrame(random.randn(50, 10), columns=columns2)
categories2 = 'A B C D E F G H I J'.split()
df2['category'] = categories2 * 5
for t in ['dict', 'axes', 'both']:
returned = df.groupby('classroom').boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=['A', 'B', 'C'])
returned = df.boxplot(by='classroom', return_type=t)
self._check_box_return_type(returned, t,
expected_keys=['height', 'weight', 'category'])
returned = df2.groupby('category').boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=categories2)
returned = df2.boxplot(by='category', return_type=t)
self._check_box_return_type(returned, t, expected_keys=columns2)
@slow
def test_grouped_box_layout(self):
df = self.hist_df
self.assertRaises(ValueError, df.boxplot, column=['weight', 'height'],
by=df.gender, layout=(1, 1))
self.assertRaises(ValueError, df.boxplot, column=['height', 'weight', 'category'],
layout=(2, 1), return_type='dict')
self.assertRaises(ValueError, df.boxplot, column=['weight', 'height'],
by=df.gender, layout=(-1, -1))
box = _check_plot_works(df.groupby('gender').boxplot, column='height',
return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=2, layout=(1, 2))
box = _check_plot_works(df.groupby('category').boxplot, column='height',
return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
# GH 6769
box = _check_plot_works(df.groupby('classroom').boxplot,
column='height', return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
# GH 5897
axes = df.boxplot(column=['height', 'weight', 'category'], by='gender',
return_type='axes')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
for ax in [axes['height']]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes['weight'], axes['category']]:
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
box = df.groupby('classroom').boxplot(
column=['height', 'weight', 'category'], return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
box = _check_plot_works(df.groupby('category').boxplot, column='height',
layout=(3, 2), return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
box = _check_plot_works(df.groupby('category').boxplot, column='height',
layout=(3, -1), return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
box = df.boxplot(column=['height', 'weight', 'category'], by='gender',
layout=(4, 1))
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(4, 1))
box = df.boxplot(column=['height', 'weight', 'category'], by='gender',
layout=(-1, 1))
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(3, 1))
box = df.groupby('classroom').boxplot(
column=['height', 'weight', 'category'], layout=(1, 4),
return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 4))
box = df.groupby('classroom').boxplot(
column=['height', 'weight', 'category'], layout=(1, -1),
return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3))
@slow
def test_grouped_box_multiple_axes(self):
# GH 6970, GH 7069
df = self.hist_df
# check warning to ignore sharex / sharey
# this check should be done in the first function which
# passes multiple axes to plot, hist or boxplot
# location should be changed if other test is added
# which has earlier alphabetical order
with tm.assert_produces_warning(UserWarning):
fig, axes = self.plt.subplots(2, 2)
df.groupby('category').boxplot(column='height', return_type='axes', ax=axes)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
fig, axes = self.plt.subplots(2, 3)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
returned = df.boxplot(column=['height', 'weight', 'category'],
by='gender', return_type='axes', ax=axes[0])
returned = np.array(list(returned.values()))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
self.assert_numpy_array_equal(returned, axes[0])
self.assertIs(returned[0].figure, fig)
# draw on second row
with warnings.catch_warnings():
warnings.simplefilter('ignore')
returned = df.groupby('classroom').boxplot(
column=['height', 'weight', 'category'],
return_type='axes', ax=axes[1])
returned = np.array(list(returned.values()))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
self.assert_numpy_array_equal(returned, axes[1])
self.assertIs(returned[0].figure, fig)
with tm.assertRaises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
axes = df.groupby('classroom').boxplot(ax=axes)
@slow
def test_grouped_hist_layout(self):
df = self.hist_df
self.assertRaises(ValueError, df.hist, column='weight', by=df.gender,
layout=(1, 1))
self.assertRaises(ValueError, df.hist, column='height', by=df.category,
layout=(1, 3))
self.assertRaises(ValueError, df.hist, column='height', by=df.category,
layout=(-1, -1))
axes = _check_plot_works(df.hist, column='height', by=df.gender,
layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
axes = _check_plot_works(df.hist, column='height', by=df.gender,
layout=(2, -1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
axes = df.hist(column='height', by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.hist(column='height', by=df.category, layout=(-1, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.hist(column='height', by=df.category, layout=(4, 2), figsize=(12, 8))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 8))
tm.close()
# GH 6769
axes = _check_plot_works(df.hist, column='height', by='classroom', layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
# without column
axes = _check_plot_works(df.hist, by='classroom')
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.hist(by='gender', layout=(3, 5))
self._check_axes_shape(axes, axes_num=2, layout=(3, 5))
axes = df.hist(column=['height', 'weight', 'category'])
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
@slow
def test_grouped_hist_multiple_axes(self):
# GH 6970, GH 7069
df = self.hist_df
fig, axes = self.plt.subplots(2, 3)
returned = df.hist(column=['height', 'weight', 'category'], ax=axes[0])
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
self.assert_numpy_array_equal(returned, axes[0])
self.assertIs(returned[0].figure, fig)
returned = df.hist(by='classroom', ax=axes[1])
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
self.assert_numpy_array_equal(returned, axes[1])
self.assertIs(returned[0].figure, fig)
with tm.assertRaises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
axes = df.hist(column='height', ax=axes)
@slow
def test_axis_share_x(self):
df = self.hist_df
# GH4089
ax1, ax2 = df.hist(column='height', by=df.gender, sharex=True)
# share x
self.assertTrue(ax1._shared_x_axes.joined(ax1, ax2))
self.assertTrue(ax2._shared_x_axes.joined(ax1, ax2))
# don't share y
self.assertFalse(ax1._shared_y_axes.joined(ax1, ax2))
self.assertFalse(ax2._shared_y_axes.joined(ax1, ax2))
@slow
def test_axis_share_y(self):
df = self.hist_df
ax1, ax2 = df.hist(column='height', by=df.gender, sharey=True)
# share y
self.assertTrue(ax1._shared_y_axes.joined(ax1, ax2))
self.assertTrue(ax2._shared_y_axes.joined(ax1, ax2))
# don't share x
self.assertFalse(ax1._shared_x_axes.joined(ax1, ax2))
self.assertFalse(ax2._shared_x_axes.joined(ax1, ax2))
@slow
def test_axis_share_xy(self):
df = self.hist_df
ax1, ax2 = df.hist(column='height', by=df.gender, sharex=True,
sharey=True)
# share both x and y
self.assertTrue(ax1._shared_x_axes.joined(ax1, ax2))
self.assertTrue(ax2._shared_x_axes.joined(ax1, ax2))
self.assertTrue(ax1._shared_y_axes.joined(ax1, ax2))
self.assertTrue(ax2._shared_y_axes.joined(ax1, ax2))
def test_option_mpl_style(self):
set_option('display.mpl_style', 'default')
set_option('display.mpl_style', None)
set_option('display.mpl_style', False)
with tm.assertRaises(ValueError):
set_option('display.mpl_style', 'default2')
def test_invalid_colormap(self):
df = DataFrame(randn(3, 2), columns=['A', 'B'])
with tm.assertRaises(ValueError):
df.plot(colormap='invalid_colormap')
def test_series_groupby_plotting_nominally_works(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = tm.choice(['male', 'female'], size=n)
weight.groupby(gender).plot()
tm.close()
height.groupby(gender).hist()
tm.close()
#Regression test for GH8733
height.groupby(gender).plot(alpha=0.5)
tm.close()
def test_plotting_with_float_index_works(self):
# GH 7025
df = DataFrame({'def': [1,1,1,2,2,2,3,3,3],
'val': np.random.randn(9)},
index=[1.0,2.0,3.0,1.0,2.0,3.0,1.0,2.0,3.0])
df.groupby('def')['val'].plot()
tm.close()
df.groupby('def')['val'].apply(lambda x: x.plot())
tm.close()
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, np.ndarray):
for el in objs.flat:
assert isinstance(el, plt.Axes), ('one of \'objs\' is not a '
'matplotlib Axes instance, '
'type encountered {0!r}'
''.format(el.__class__.__name__))
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), \
('objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {0!r} '
''.format(objs.__class__.__name__))
def _check_plot_works(f, *args, **kwargs):
import matplotlib.pyplot as plt
ret = None
try:
try:
fig = kwargs['figure']
except KeyError:
fig = plt.gcf()
plt.clf()
ax = kwargs.get('ax', fig.add_subplot(211))
ret = f(*args, **kwargs)
assert_is_valid_plot_return_object(ret)
try:
kwargs['ax'] = fig.add_subplot(212)
ret = f(*args, **kwargs)
except Exception:
pass
else:
assert_is_valid_plot_return_object(ret)
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
finally:
tm.close(fig)
return ret
def _generate_4_axes_via_gridspec():
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec
gs = mpl.gridspec.GridSpec(2, 2)
ax_tl = plt.subplot(gs[0,0])
ax_ll = plt.subplot(gs[1,0])
ax_tr = plt.subplot(gs[0,1])
ax_lr = plt.subplot(gs[1,1])
return gs, [ax_tl, ax_ll, ax_tr, ax_lr]
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
the-stack_106_28218 | from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
options = Table('options', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=64)),
Column('properties', String(length=256)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['options'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['options'].drop()
|
the-stack_106_28219 | nome = str(input('Nome: '))
idade = int(input('Idade: '))
peso = float(input('Peso: '))
altura = float(input('Altura: '))
n_sus = str(input('Número do SUS:'))
diagnostico = str(input('Diagnostico: '))
print(f'{nome},{idade},{peso},{altura},{n_sus},{diagnostico}')
'''954,7 kb
886,9 kb
https://repl.it/repls/DeliciousHastyIntegers#main.py''' |
the-stack_106_28220 | import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from . import cartopy_borders, cartopy_proj_albers
def map_pretty(ax, title=''):
state_borders, us_border = cartopy_borders()
ax.add_geometries(
state_borders,
facecolor='none',
edgecolor='k',
crs=cartopy_proj_albers(),
linewidth=0.3,
zorder=0,
)
ax.add_geometries(
us_border,
facecolor='none',
edgecolor='k',
crs=cartopy_proj_albers(),
linewidth=0.3,
zorder=0,
)
ax.axis('off')
ax.set_extent([-125, -70, 20, 50])
ax.text(0.77, 0.96, title, transform=ax.transAxes)
# def
def add_colorbar(
fig,
to_plot=None,
x_location=1.08,
y_location=0.76,
height=0.12,
width=0.018,
vmin=None,
vmax=None,
cbar_label='',
cmap='viridis',
):
cax = fig.add_axes([x_location, y_location, width, height])
cax.text(
0.5,
-0.08,
vmin,
transform=cax.transAxes,
horizontalalignment='center',
verticalalignment='center',
)
cax.text(
0.5,
1.08,
vmax,
transform=cax.transAxes,
horizontalalignment='center',
verticalalignment='center',
)
cax.text(
1.8,
0.5,
cbar_label,
transform=cax.transAxes,
verticalalignment='center',
multialignment='center',
rotation=-90,
)
if to_plot is not None:
cbar = fig.colorbar(to_plot, cax=cax, orientation='vertical')
else:
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
cbar = fig.colorbar(
mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical'
)
cbar.outline.set_visible(False)
cbar.set_ticks([])
return cbar
def ts_pretty(ax, impact, ylims):
ax.set_xlim(1970, 2100)
ax.set_xlabel("")
ax.set_ylabel("")
ax.set_ylim(ylims)
ax.set_title("")
def multipanel_ts(results_dict, region_bboxes, fig_path):
gcms = [
("MRI-ESM2-0", (0, 0)),
("MIROC-ES2L", (1, 0)),
("MPI-ESM1-2-LR", (2, 0)),
("ACCESS-ESM1-5", (3, 0)),
("ACCESS-CM2", (4, 0)),
("CanESM5-CanOE", (5, 0)),
]
titles = {
"fire": "Burn area\n(fraction/year)",
"drought": "Drought-related\nmortality (%/year)",
"insects": "Insect-related\nmortality (%/year)",
}
ylims = {
"fire": (0, 0.05),
"drought": (0, 2.5),
"insects": (0, 1),
}
fig = plt.figure(figsize=(12, 10))
full_gridspec = gridspec.GridSpec(4, 4, wspace=0.2, hspace=0.2)
for col, region in enumerate(region_bboxes.keys()):
ax = fig.add_subplot(full_gridspec[0, col], projection=cartopy_proj_albers())
map_pretty(ax, title=region)
ax.add_patch(
mpatches.Rectangle(
xy=[region_bboxes[region]['x'].start, region_bboxes[region]['y'].start],
width=region_bboxes[region]['x'].stop - region_bboxes[region]['x'].start,
height=region_bboxes[region]['y'].stop - region_bboxes[region]['y'].start,
facecolor='grey',
alpha=0.5,
)
)
for impact_num, impact in enumerate(["fire", "drought", "insects"]):
impact_axes = []
for j, region in enumerate(region_bboxes.keys()):
ax = fig.add_subplot(full_gridspec[1 + impact_num, j])
results_dict[impact][region]["historical"].plot(ax=ax, color="k", zorder=60)
for scenario in ["ssp245", "ssp370", "ssp585"]:
impact_axes.append(ax)
plot_future_ts_traces(ax, results_dict[impact][region]["future"], scenario, gcms)
ts_pretty(ax, impact, ylims[impact])
if impact != 'insects':
ax.set_xticks([])
if len(impact_axes) > 3:
ax.set_yticks([])
else:
ax.set_ylabel(titles[impact])
for format_string in ["svg", "png"]:
plt.savefig(fig_path + format_string, format=format_string)
def plot_future_ts_traces(ax, ds, scenario, gcms):
scenario_colors = {
"ssp245": "#59A82F",
"ssp370": "#D8B525",
"ssp585": "#D83232",
}
scenario_colors_light = {
"ssp245": "#DEEED5",
"ssp370": "#F7F0D3",
"ssp585": "#F7D6D6",
}
ssp_rename = {"ssp245": "SSP2-4.5", "ssp370": "SSP3-7.0", "ssp585": "SSP5-8.5"}
for (gcm, location) in gcms:
ds.sel(gcm=gcm, scenario=scenario).plot(ax=ax, color=scenario_colors_light[scenario])
ds.sel(scenario=scenario).mean(dim="gcm").plot(
ax=ax, color=scenario_colors[scenario], label=ssp_rename[scenario], zorder=30
)
|
the-stack_106_28221 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#!/usr/bin/env python
import glob
import os
import torch
from setuptools import find_packages
from setuptools import setup
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
requirements = ["torch", "torchvision"]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "dsgn", "csrc")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv("FORCE_CUDA", "0") == "1":
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"dsgn._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="dsgn",
version="0.1",
author="ylchen",
url="https://github.com/chenyilun95/DSGN",
description="stereo-based 3D object detection in pytorch",
packages=find_packages(exclude=("configs", "tools", "preprocessing")),
# install_requires=requirements,
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
|
the-stack_106_28222 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <[email protected]>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import netius
async def compute(x, y):
print("Compute %s + %s ..." % (x, y))
await netius.sleep(1.0)
return x + y
async def print_sum(x, y):
result = await compute(x, y)
print("%s + %s = %s" % (x, y, result))
loop = netius.get_loop(_compat = True)
loop.run_until_complete(print_sum(1, 2))
loop.close()
|
the-stack_106_28223 | import re
import requests
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.contrib.admin.sites import site as admin_site
from django.db.models.fields.related import ManyToOneRel
from django.forms import fields, Media, ModelChoiceField
from django.forms.widgets import RadioSelect
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from django_select2.forms import HeavySelect2Widget
from cms.utils import get_current_site
from cms.models import Page
from entangled.forms import EntangledModelFormMixin, get_related_object
from filer.models.filemodels import File as FilerFileModel
from filer.fields.file import AdminFileWidget, FilerFileField
try:
from phonenumber_field.formfields import PhoneNumberField
except ImportError:
PhoneNumberField = None
def format_page_link(title, path):
html = format_html("{} ({})", mark_safe(title), path)
return html
class PageSelect2Widget(HeavySelect2Widget):
def __init__(self, *args, **kwargs):
kwargs.setdefault('data_view', 'admin:get_published_pagelist')
super().__init__(*args, **kwargs)
@property
def media(self):
parent_media = super().media
# append jquery.init.js to enforce select2.js into the global 'jQuery' namespace
js = list(parent_media._js) + ['admin/js/jquery.init.js']
return Media(css=parent_media._css, js=js)
def render(self, *args, **kwargs):
# replace self.choices by an empty list in order to prevent building the whole optgroup
try:
page = Page.objects.get(pk=kwargs['value'])
except (Page.DoesNotExist, ValueError, KeyError):
self.choices = []
else:
self.choices = [(kwargs['value'], str(page))]
return super().render(*args, **kwargs)
class LinkSearchField(ModelChoiceField):
widget = PageSelect2Widget()
def __init__(self, *args, **kwargs):
queryset = Page.objects.public()
try:
queryset = queryset.published().on_site(get_current_site())
except:
choices = [] # can happen if database is not ready yet
else:
# set a minimal set of choices, otherwise django-select2 builds them for every published page
choices = [(index, str(page)) for index, page in enumerate(queryset[:15])]
kwargs.setdefault('queryset', queryset.distinct())
super().__init__(*args, **kwargs)
self.choices = choices
class SectionChoiceField(fields.ChoiceField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('choices', [('', _("Page Root"))])
super().__init__(*args, **kwargs)
def valid_value(self, value):
"""
The optgroup is adjusted dynamically accroding to the selected cms_page, so always returns True
and let `LinkForm` validate this value.
"""
return True
class LinkForm(EntangledModelFormMixin):
LINK_TYPE_CHOICES = [
('cmspage', _("CMS Page")),
('download', _("Download File")),
('exturl', _("External URL")),
('email', _("Mail To")),
]
if PhoneNumberField:
LINK_TYPE_CHOICES.append(('phonenumber', _("Phone number")))
link_type = fields.ChoiceField(
label=_("Link"),
help_text=_("Type of link"),
)
cms_page = LinkSearchField(
required=False,
label='',
help_text=_("An internal link onto any CMS page of this site"),
)
section = SectionChoiceField(
required=False,
label='',
help_text=_("Page bookmark"),
)
download_file = ModelChoiceField(
label='',
queryset=FilerFileModel.objects.all(),
widget=AdminFileWidget(ManyToOneRel(FilerFileField, FilerFileModel, 'id'), admin_site),
required=False,
help_text=_("An internal link onto a file from filer"),
)
ext_url = fields.URLField(
required=False,
label=_("URL"),
help_text=_("Link onto external page"),
)
mail_to = fields.EmailField(
required=False,
label=_("Email"),
help_text=_("Open Email program with this address"),
)
if PhoneNumberField:
phone_number = PhoneNumberField(
required=False,
label=_("Phone Number"),
help_text=_("International phone number, ex. +1 212 555 2368."),
)
link_target = fields.ChoiceField(
choices=[
('', _("Same Window")),
('_blank', _("New Window")),
('_parent', _("Parent Window")),
('_top', _("Topmost Frame")),
],
label=_("Link Target"),
widget=RadioSelect,
required=False,
help_text=_("Open Link in other target."),
)
link_title = fields.CharField(
label=_("Title"),
required=False,
help_text=_("Link's Title"),
)
class Meta:
entangled_fields = {'glossary': ['link_type', 'cms_page', 'section', 'download_file', 'ext_url', 'mail_to',
'link_target', 'link_title']}
if PhoneNumberField:
entangled_fields['glossary'].append('phone_number')
def __init__(self, *args, **kwargs):
link_type_choices = []
if not getattr(self, 'require_link', True):
link_type_choices.append(('', _("No Link")))
self.declared_fields['link_type'].required = False
link_type_choices.extend(self.LINK_TYPE_CHOICES)
self.declared_fields['link_type'].choices = link_type_choices
self.declared_fields['link_type'].initial = link_type_choices[0][0]
instance = kwargs.get('instance')
if instance and instance.glossary.get('link_type') == 'cmspage':
self._preset_section(instance)
super().__init__(*args, **kwargs)
def _preset_section(self, instance):
"""
Field ``cms_page`` may refer onto any CMS page, which itself may contain bookmarks. This method
creates the list of bookmarks.
"""
self.base_fields['section'].choices = self.base_fields['section'].choices[:1]
try:
cascade_page = get_related_object(instance.glossary, 'cms_page').cascadepage
for key, val in cascade_page.glossary.get('element_ids', {}).items():
self.base_fields['section'].choices.append((key, val))
except (AttributeError, ObjectDoesNotExist):
pass
def clean(self):
cleaned_data = super().clean()
link_type = cleaned_data.get('link_type')
error = None
if link_type == 'cmspage':
if not cleaned_data.get('cms_page'):
error = ValidationError(_("CMS page to link to is missing."))
self.add_error('cms_page', error)
elif link_type == 'download':
if not cleaned_data.get('download_file'):
error = ValidationError(_("File for download is missing."))
self.add_error('download_file', error)
elif link_type == 'exturl':
ext_url = cleaned_data.get('ext_url')
if ext_url:
try:
response = requests.head(ext_url, allow_redirects=True)
if response.status_code != 200:
error = ValidationError(_("No external page found on {url}.").format(url=ext_url))
except Exception as exc:
error = ValidationError(_("Failed to connect to {url}.").format(url=ext_url))
else:
error = ValidationError(_("No valid URL provided."))
if error:
self.add_error('ext_url', error)
elif link_type == 'email':
mail_to = cleaned_data.get('mail_to')
if mail_to:
if not re.match(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)', mail_to):
msg = _("'{email}' is not a valid email address.")
error = ValidationError(msg.format(email=mail_to))
else:
error = ValidationError(_("No email address provided."))
if error:
self.add_error('mail_to', error)
elif link_type == 'phonenumber':
phone_number = cleaned_data.get('phone_number')
if phone_number:
cleaned_data['phone_number'] = str(phone_number)
if error:
raise error
return cleaned_data
@classmethod
def unset_required_for(cls, sharable_fields):
"""
Fields borrowed by `SharedGlossaryAdmin` to build its temporary change form, only are
required if they are declared in `sharable_fields`. Otherwise just deactivate them.
"""
if 'link_content' in cls.base_fields and 'link_content' not in sharable_fields:
cls.base_fields['link_content'].required = False
if 'link_type' in cls.base_fields and 'link' not in sharable_fields:
cls.base_fields['link_type'].required = False
class TextLinkFormMixin(EntangledModelFormMixin):
link_content = fields.CharField(
label=_("Link Content"),
widget=fields.TextInput(attrs={'id': 'id_name'}), # replace auto-generated id so that CKEditor automatically transfers the text into this input field
help_text=_("Content of Link"),
)
class Meta:
entangled_fields = {'glossary': ['link_content']}
|
the-stack_106_28224 | import pytest
from flask import Flask
from flask_discord_interactions import DiscordInteractions, SlashCommand, Response
def test_slash_command(discord, client):
with pytest.deprecated_call():
command = SlashCommand(lambda ctx: "ping", "ping", "No description", [], [])
# make sure the object still works
assert command.name == "ping"
def test_response():
with pytest.deprecated_call():
response = Response("Hello this is my response")
assert response.content == "Hello this is my response"
def test_add_slash_command(discord):
with pytest.deprecated_call():
discord.add_slash_command(
lambda ctx: "ping", "ping", "test test test", [], []
)
assert discord.discord_commands["ping"].description == "test test test"
def test_update_slash_commands():
app = Flask(__name__)
app.config["DONT_VALIDATE_SIGNATURE"] = True
app.config["DONT_REGISTER_WITH_DISCORD"] = True
discord = DiscordInteractions(app)
with pytest.deprecated_call():
discord.update_slash_commands()
|
the-stack_106_28231 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonate.fields
import test_app.models
class Migration(migrations.Migration):
dependencies = [
('test_app', '0002_mymodelwithjsonatefield'),
]
operations = [
migrations.CreateModel(
name='WithJsonateFieldExpectingList',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('some_name', models.CharField(max_length=255)),
('some_json_data', jsonate.fields.JsonateField(default=list, validators=[test_app.models.validate_list])),
],
),
]
|
the-stack_106_28233 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
from os import symlink
class Bridger(MakefilePackage):
"""Bridger : An Efficient De novo Transcriptome Assembler For
RNA-Seq Data"""
homepage = "https://sourceforge.net/projects/rnaseqassembly/"
url = "https://downloads.sourceforge.net/project/rnaseqassembly/Bridger_r2014-12-01.tar.gz"
version('2014-12-01', sha256='8fbec8603ea8ad2162cbd0c658e4e0a4af6453bdb53310b4b7e0d112e40b5737')
depends_on('boost')
depends_on('perl', type='run')
def install(self, spec, prefix):
# bridger depends very much on perl scripts/etc in the source tree
install_path = join_path(prefix, 'usr/local/bridger')
mkdirp(install_path)
install_tree('.', install_path)
# symlink the init script to /bin
mkdirp(prefix.bin)
symlink(join_path(install_path, 'Bridger.pl'),
join_path(prefix.bin, 'Bridger.pl'))
|
the-stack_106_28235 | # -*- coding: utf-8 -*-
"""
Plot point-spread functions (PSFs) and cross-talk functions (CTFs)
==================================================================
Visualise PSF and CTF at one vertex for sLORETA.
"""
# Authors: Olaf Hauk <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD-3-Clause
# %%
import mne
from mne.datasets import sample
from mne.minimum_norm import (make_inverse_resolution_matrix, get_cross_talk,
get_point_spread)
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path / 'subjects'
meg_path = data_path / 'MEG' / 'sample'
fname_fwd = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_cov = meg_path / 'sample_audvis-cov.fif'
fname_evo = meg_path / 'sample_audvis-ave.fif'
# read forward solution
forward = mne.read_forward_solution(fname_fwd)
# forward operator with fixed source orientations
mne.convert_forward_solution(forward, surf_ori=True,
force_fixed=True, copy=False)
# noise covariance matrix
noise_cov = mne.read_cov(fname_cov)
# evoked data for info
evoked = mne.read_evokeds(fname_evo, 0)
# make inverse operator from forward solution
# free source orientation
inverse_operator = mne.minimum_norm.make_inverse_operator(
info=evoked.info, forward=forward, noise_cov=noise_cov, loose=0.,
depth=None)
# regularisation parameter
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = 'MNE' # can be 'MNE' or 'sLORETA'
# compute resolution matrix for sLORETA
rm_lor = make_inverse_resolution_matrix(forward, inverse_operator,
method='sLORETA', lambda2=lambda2)
# get PSF and CTF for sLORETA at one vertex
sources = [1000]
stc_psf = get_point_spread(rm_lor, forward['src'], sources, norm=True)
stc_ctf = get_cross_talk(rm_lor, forward['src'], sources, norm=True)
del rm_lor
##############################################################################
# Visualize
# ---------
# PSF:
# Which vertex corresponds to selected source
vertno_lh = forward['src'][0]['vertno']
verttrue = [vertno_lh[sources[0]]] # just one vertex
# find vertices with maxima in PSF and CTF
vert_max_psf = vertno_lh[stc_psf.data.argmax()]
vert_max_ctf = vertno_lh[stc_ctf.data.argmax()]
brain_psf = stc_psf.plot('sample', 'inflated', 'lh', subjects_dir=subjects_dir)
brain_psf.show_view('ventral')
brain_psf.add_text(0.1, 0.9, 'sLORETA PSF', 'title', font_size=16)
# True source location for PSF
brain_psf.add_foci(verttrue, coords_as_verts=True, scale_factor=1., hemi='lh',
color='green')
# Maximum of PSF
brain_psf.add_foci(vert_max_psf, coords_as_verts=True, scale_factor=1.,
hemi='lh', color='black')
# %%
# CTF:
brain_ctf = stc_ctf.plot('sample', 'inflated', 'lh', subjects_dir=subjects_dir)
brain_ctf.add_text(0.1, 0.9, 'sLORETA CTF', 'title', font_size=16)
brain_ctf.show_view('ventral')
brain_ctf.add_foci(verttrue, coords_as_verts=True, scale_factor=1., hemi='lh',
color='green')
# Maximum of CTF
brain_ctf.add_foci(vert_max_ctf, coords_as_verts=True, scale_factor=1.,
hemi='lh', color='black')
# %%
# The green spheres indicate the true source location, and the black
# spheres the maximum of the distribution.
|
the-stack_106_28236 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
# test_maxsum.py
# algorithms
#
# Created by Haibao Tang on 06/19/21
# Copyright © 2021 Haibao Tang. All rights reserved.
#
import pytest
@pytest.mark.parametrize(
"input,output",
[
([4, 4, 9, -5, -6, -1, 5, -6, -8, 9], (17, 0, 2)),
([8, -10, 10, -9, -6, 9, -7, -4, -10, -8], (10, 2, 2)),
([10, 1, -10, -8, 6, 10, -10, 6, -3, 10], (19, 4, 9)),
],
)
def test_max_sum(input, output):
from jcvi.algorithms.maxsum import max_sum
assert max_sum(input) == output
|
the-stack_106_28237 | """
This file offers the methods to automatically retrieve the graph Paraliobacillus sp. PM-2.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def ParaliobacillusSpPm2(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Paraliobacillus sp. PM-2 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Paraliobacillus sp. PM-2 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="ParaliobacillusSpPm2",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_106_28238 | import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from numba import jit,double,int64
@jit(double(double,double))
def cos2phi(qc,qt):
e1=3.94
e2=4.84
k1t=-0.105
k2t=0.149
lamda=0.262
Delta = 0.5*(e2+k2t*qt-e1-k1t*qt)
return Delta / np.sqrt( Delta**2. + lamda**2.*qc**2. )
@jit(double(double,double))
def sin2phi(qc,qt):
e1=3.94
e2=4.84
k1t=-0.105
k2t=0.149
lamda=0.262
Delta = 0.5*(e2+k2t*qt-e1-k1t*qt)
return lamda*qc / np.sqrt( Delta**2. + lamda**2.*qc**2. )
@jit()
def make_kmat(nc,nt,alpha1,alpha2,qc,qt,gam):
print(nc,nt)
Kmat = np.zeros((nc*nt,nc*nt))
nmax = nc*nt
for i in range(nc):
for k in range(nt):
ind1 = i*nt + k
for ip in range(nc):
for kp in range(nt):
ind2 = ip*nt + kp
Kmat[ind1,ind2] = np.exp((-alpha1*(qc[i]-qc[ip])**2.) + (-alpha2*2.*(np.sin(0.5*(qt[k] - qt[kp])))**2.))
if ind1 == ind2:
Kmat[ind1,ind1] += gam**2.
return Kmat
@jit()
def make_vkrr(nc,nt,ncd,ntd,qc,qt,qcd,qtd,alpha1,alpha2,w1,w2,w12):
vkrr1 = np.zeros((ncd,ntd))
vkrr2 = np.zeros((ncd,ntd))
vkrr12 = np.zeros((ncd,ntd))
for i in range(ncd):
for j in range(ntd):
for ip in range(nc):
for jp in range(nt):
k = ip*nt + jp
vkrr1[i,j] += w1[k]*np.exp((-alpha1*(qcd[i]-qc[ip])**2.) + (-alpha2*2.*(np.sin(0.5*(qtd[j] - qt[jp])))**2.))
vkrr2[i,j] += w2[k]*np.exp((-alpha1*(qcd[i]-qc[ip])**2.) + (-alpha2*2.*(np.sin(0.5*(qtd[j] - qt[jp])))**2.))
vkrr12[i,j] += w12[k]*np.exp((-alpha1*(qcd[i]-qc[ip])**2.) + (-alpha2*2.*(np.sin(0.5*(qtd[j] - qt[jp])))**2.))
return vkrr1,vkrr2,vkrr12
wc = 0.19
kappa0 = 0.0
kappa1 = 0.095
minv = 1.43e-3
E0 = 0.0
E1 = 2.00
W0 = 2.3
W1 = 1.50
lamda = 0.19
# parameters for kernel matrix
alpha1 = 0.1
alpha2 = 0.1
gam = 0.0001
# grid for potential
qc = np.arange(-6.,6.,0.5)
nc = len(qc)
qt = np.arange(-0.5*np.pi,0.5*3*np.pi,0.1*np.pi)
nt = len(qt)
# diabatic potentials
v1 = np.zeros((nc,nt))
v2 = np.zeros((nc,nt))
v12 = np.zeros((nc,nt))
for i in range(nc):
for j in range(nt):
v1[i,j] = E0 + 0.5*W0 + 0.5*wc*qc[i]**2. - 0.5*W0*np.cos(qt[j])
v2[i,j] = E1 - 0.5*W1 + 0.5*wc*qc[i]**2. + kappa1*qc[i] + 0.5*W1*np.cos(qt[j])
v12[i,j] = lamda*qc[i]
print('making kmat')
Kmat = make_kmat(nc,nt,alpha1,alpha2,qc,qt,gam)
print('inverting kmat')
#Kmat = make_kmat(nc,nt,alpha,qc,qt,gam)
Kinv = np.linalg.inv(Kmat)
w1 = np.dot(Kinv,v1.flatten())
w2 = np.dot(Kinv,v2.flatten())
w12 = np.dot(Kinv,v12.flatten())
print('plotting diabatic states')
# grid for potential
qcd = np.arange(-6.,6.,0.01)
ncd = len(qcd)
qtd = np.arange(-0.5*np.pi,0.5*3*np.pi,0.01)
ntd = len(qtd)
# diabatic potentials
v1 = np.zeros((ncd,ntd))
v2 = np.zeros((ncd,ntd))
v12 = np.zeros((ncd,ntd))
for i in range(ncd):
for j in range(ntd):
v1[i,j] = E0 + 0.5*W0 + 0.5*wc*qcd[i]**2. - 0.5*W0*np.cos(qtd[j])
v2[i,j] = E1 - 0.5*W1 + 0.5*wc*qcd[i]**2. + kappa1*qcd[i] + 0.5*W1*np.cos(qtd[j])
v12[i,j] = lamda*qcd[i]
vkrr1,vkrr2,vkrr12 = make_vkrr(nc,nt,ncd,ntd,qc,qt,qcd,qtd,alpha1,alpha2,w1,w2,w12)
QC,QT = np.meshgrid(qtd,qcd)
# diabatic
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(QC,QT,v1,alpha=0.5)
ax.plot_surface(QC,QT,v2,alpha=0.5)
ax.set_title('diabatic')
ax.set_xlabel(r'$q_c$')
ax.set_ylabel(r'$q_t$')
plt.tight_layout()
#plt.savefig('og_db_pes.png')
plt.show()
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(QC,QT,vkrr1,alpha=0.5)
ax.plot_surface(QC,QT,vkrr2,alpha=0.5)
ax.set_title('diabatic krr')
ax.set_xlabel(r'$q_c$')
ax.set_ylabel(r'$q_t$')
plt.tight_layout()
#plt.savefig('krr_db_pes.png')
plt.show()
plt.contourf(QC,QT,vkrr1-v1)
plt.xlabel(r'$q_c$')
plt.ylabel(r'$q_t$')
plt.tight_layout()
#plt.savefig('krr_db_pes.png')
plt.colorbar()
plt.show()
plt.contourf(QC,QT,vkrr2-v2)
plt.xlabel(r'$q_c$')
plt.ylabel(r'$q_t$')
plt.tight_layout()
plt.colorbar()
#plt.savefig('krr_db_pes.png')
plt.show()
|
the-stack_106_28240 | from aws_cdk import (
core,
aws_iam,
)
class KnowledgeAnalyzerIAMStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.PREFIX = id
## **************** Create HealthLake Knowledge Analyzer Service Role ****************
self.service_role = aws_iam.Role(
self, f'{self.PREFIX}-ServiceRole',
assumed_by = aws_iam.CompositePrincipal(
aws_iam.ServicePrincipal('sns.amazonaws.com'),
aws_iam.ServicePrincipal('sqs.amazonaws.com'),
aws_iam.ServicePrincipal('lambda.amazonaws.com'),
aws_iam.ServicePrincipal('rds.amazonaws.com'),
aws_iam.ServicePrincipal('healthlake.amazonaws.com'),
aws_iam.ServicePrincipal('ec2.amazonaws.com'),
aws_iam.ServicePrincipal('kendra.amazonaws.com'),
aws_iam.ServicePrincipal('sagemaker.amazonaws.com'),
),
role_name = f"{self.PREFIX}-ServiceRole",
)
self.updateServiceRolePermissions()
def updateServiceRolePermissions(self):
resource_prefix = "HEALTHLAKE-KNOWLEDGE-ANALYZER"
## **************** Service Permissions ****************
self.service_role.add_to_policy(aws_iam.PolicyStatement(
effect = aws_iam.Effect.ALLOW,
resources = [
f"arn:aws:sqs:us-*:{self.account}:{resource_prefix}*",
f"arn:aws:sns:us-*:{self.account}:{resource_prefix}*",
f"arn:aws:logs:us-*:{self.account}:*",
f"arn:aws:neptune-db:us-*:{self.account}:*",
f"arn:aws:healthlake:us-*:{self.account}:*",
f"arn:aws:ec2:us-*:{self.account}:*",
f"arn:aws:sagemaker:us-*:{self.account}:*",
"*"
],
actions = [
"sqs:*",
"sns:*",
"logs:*",
"healthlake:*",
"iam:PassRole",
"s3:*",
"rds:*",
"neptune-db:*",
"ec2:*",
"sagemaker:*",
],
conditions = [],
))
# Healthlake
self.app_instance_role = aws_iam.Role(
self,
"AmazonHealthLake-Export-us-east-1-HealthKnoMaDataAccessRole",
role_name=f'AmazonHealthLake-Export-us-east-1-HealthKnoMaDataAccessRole',
assumed_by=aws_iam.ServicePrincipal("healthlake.amazonaws.com")
)
roleStmt1=aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
resources=["arn:aws:s3:::*"],
actions=["s3:PutObject"]
)
roleStmt2=aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
resources=["arn:aws:s3:::*"],
actions=["s3:ListBucket", "s3:GetBucketPublicAccessBlock", "s3:GetEncryptionConfiguration"]
)
self.app_instance_role.add_to_policy( roleStmt1 )
self.app_instance_role.add_to_policy( roleStmt2 )
# self.app_instance_role = aws_iam.CfnRole(
# self,
# "AmazonHealthLake-Export-us-east-1-HealthDataAccessRole",
# role_name=f'AmazonHealthLake-Export-us-east-1-HealthDataAccessRole',
# assume_role_policy_document=aws_iam.PolicyDocument(
# statements=[
# aws_iam.PolicyStatement(
# effect=aws_iam.Effect.ALLOW,
# actions=[ "sts:AssumeRole" ],
# principals=[ aws_iam.ServicePrincipal("healthlake.amazonaws.com") ]
# )
# ]
# ),
# policies=[
# aws_iam.CfnRole.PolicyProperty(
# policy_document=aws_iam.PolicyDocument(
# statements=[
# aws_iam.PolicyStatement(
# effect=aws_iam.Effect.ALLOW,
# actions=[
# "s3:PutObject"
# ],
# resources=["*"]
# )
# ]
# ),
# policy_name="HelathlakeAllowS3PutObject"
# ),
# aws_iam.CfnRole.PolicyProperty(
# policy_document=aws_iam.PolicyDocument(
# statements=[
# aws_iam.PolicyStatement(
# effect=aws_iam.Effect.ALLOW,
# actions=[
# "s3:ListBucket",
# "s3:GetBucketPublicAccessBlock",
# "s3:GetEncryptionConfiguration"
# ],
# resources=[ "*" ]
# )
# ]
# ),
# policy_name="HelathlakeAllowS3OListGetBucketAndEncryption"
# ),
# ],
# ) |
the-stack_106_28242 | from nltk.stem.isri import ISRIStemmer
from nltk.corpus import stopwords
from nltk.tokenize import WordPunctTokenizer
import pickle
import argparse
tokenizer = WordPunctTokenizer()
stemmer = ISRIStemmer()
stopwords = set(stopwords.words('arabic'))
SYMBOLS = set('!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~\"')
print(stopwords)
print(SYMBOLS)
def clean_string(doc):
doc_tokens = tokenizer.tokenize(doc)
cleaned_tokens = []
for token in doc_tokens:
if token in stopwords or token in SYMBOLS:
continue
cleaned_tokens.append(stemmer.stem(token))
return " ".join(cleaned_tokens)
def stem_all_docs(docs):
cleaned_docs = []
for (i, doc) in enumerate(docs):
cleaned_docs.append(clean_string(doc))
if (i % 40000) == 0:
print("Finished {:.2f}".format(100.00 * i / len(docs)))
print("Finished Cleaning")
return cleaned_docs
def clean_wiki(wiki_path, type):
wiki_data = pickle.load(open(wiki_path, "rb"))
docs = []
if type == 'para':
print("Extracting paragraphs!")
else:
print("Extracting articles!")
for art, pars in wiki_data.items():
if type == 'para':
for par in pars:
docs.append(par)
else:
docs.append(" ".join(pars))
pickle.dump(docs, open("arwiki_type_{}.p".format(type), "wb"))
pickle.dump(stem_all_docs(docs), open("arwiki_cleaned_type_{}.p".format(type), "wb"))
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--wiki-path', help='Path of arwiki.p', default="arwiki.p")
parser.add_argument('-t', '--type', help='Paragraph/Article', default="para")
if __name__ == "__main__":
args = parser.parse_args()
clean_wiki(args.wiki_path, args.type)
|
the-stack_106_28249 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Generates molecules that satisfy two targets.
Used a single Q-function as policy
Target1: SAS
Target2: QED
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
from absl import app
from absl import flags
from rdkit import Chem
from rdkit.Chem import QED
from rdkit.Contrib import SA_Score
from tensorflow.compat.v1 import gfile
from mol_dqn.chemgraph.mcts import deep_q_networks
from mol_dqn.chemgraph.mcts import molecules as molecules_mdp
from mol_dqn.chemgraph.mcts import run_dqn
from mol_dqn.chemgraph.tensorflow import core
flags.DEFINE_float('target_sas', 1, 'The target SAS of the molecule.')
flags.DEFINE_float('target_qed', 0.5, 'The target QED of the molecule.')
flags.DEFINE_boolean('use_multiply', True, 'mul')
flags.DEFINE_float('gamma', 0.999, 'discount')
FLAGS = flags.FLAGS
class MultiObjectiveRewardMolecule(molecules_mdp.Molecule):
"""Defines the subclass of generating a molecule with a specific reward.
The reward is defined as a 1-D vector with 2 entries: similarity and QED
reward = (similarity_score, qed_score)
"""
def _reward(self):
"""Calculates the reward of the current state.
The reward is defined as a tuple of the similarity and QED value.
Returns:
A tuple of the similarity and qed value
"""
# calculate similarity.
# if the current molecule does not contain the scaffold of the target,
# similarity is zero.
if self._state is None:
return 0.0, 0.0
mol = Chem.MolFromSmiles(self._state)
if mol is None:
return 0.0, 0.0
qed_value = QED.qed(mol)
sas = SA_Score.sascorer.calculateScore(mol)
return -abs(sas - FLAGS.target_sas), -abs(qed_value - FLAGS.target_qed)
def soft_cst(v, l, r):
if l <= v <= r:
return 1
return -min(abs(l - v), abs(r - v))
class Molecule(molecules_mdp.Molecule):
"""SAS and QED reward molecules."""
def _reward(self):
"""Calculates the reward of the current state.
The reward is defined as a tuple of the similarity and QED value.
Returns:
A tuple of the similarity and qed value
"""
# calculate similarity.
# if the current molecule does not contain the scaffold of the target,
# similarity is zero.
if self._state is None:
return 0.0
mol = Chem.MolFromSmiles(self._state)
if mol is None:
return 0.0
qed_value = QED.qed(mol)
sas = SA_Score.sascorer.calculateScore(mol)
c1 = -abs(sas - FLAGS.target_sas)
c2 = -abs(qed_value - FLAGS.target_qed)
if FLAGS.use_multiply:
if c1 < 0 and c2 < 0:
reward = -c1 * c2
else:
reward = c1 * c2
else:
reward = (c1 + c2)
return reward * FLAGS.gamma**(self.max_steps - self._counter)
def main(argv):
del argv
if FLAGS.hparams is not None:
with gfile.Open(FLAGS.hparams, 'r') as f:
hparams = deep_q_networks.get_hparams(**json.load(f))
else:
hparams = deep_q_networks.get_hparams()
hparams.add_hparam('target_qed', FLAGS.target_qed)
hparams.add_hparam('target_sas', FLAGS.target_sas)
environment = Molecule(
atom_types=set(hparams.atom_types),
init_mol='CCc1c(C)[nH]c2CCC(CN3CCOCC3)C(=O)c12',
allow_removal=hparams.allow_removal,
allow_no_modification=hparams.allow_no_modification,
allow_bonds_between_rings=False,
allowed_ring_sizes={3, 4, 5, 6},
max_steps=hparams.max_steps_per_episode)
dqn = deep_q_networks.DeepQNetwork(
input_shape=(hparams.batch_size, hparams.fingerprint_length + 1),
q_fn=functools.partial(
deep_q_networks.multi_layer_model, hparams=hparams),
optimizer=hparams.optimizer,
grad_clipping=hparams.grad_clipping,
num_bootstrap_heads=hparams.num_bootstrap_heads,
gamma=hparams.gamma,
epsilon=1.0)
run_dqn.run_training(
hparams=hparams,
environment=environment,
dqn=dqn,
)
core.write_hparams(hparams, os.path.join(FLAGS.model_dir, 'config.json'))
if __name__ == '__main__':
app.run(main)
|
the-stack_106_28251 | import os
import cv2
import numpy as np
from keras.models import load_model
from keras.callbacks import Callback, ModelCheckpoint
import sklearn
from sklearn.model_selection import train_test_split
import csv
samples = []
data = 'data2/driving_log.csv'
print (data)
#load the lines in driving log file
with open(data) as csvfile:
reader = csv.reader(csvfile, skipinitialspace=True)
for line in reader:
samples.append(line)
#Delete the header
del samples[0]
#Split dataset into train and validation samples
train_samples, validation_samples = train_test_split(samples, test_size=0.4)
import math
from math import ceil
import random
from random import shuffle
# Set our batch size
batch_size=8
# Set our train and validations steps per epoch (80% and 20% of total training samples).
num_train_steps = ceil(192.6/(batch_size))
num_validation_steps = ceil(128.4/(batch_size))
#Generator function to yield train and validation samples
def generator(samples, train, batch_size=32):
global num_train_steps
global num_validation_steps
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
left_images = []
left_angles = []
right_images = []
right_angles = []
for batch_sample in batch_samples:
name = 'data/'+ batch_sample[0]
#Append the image and angle
center_image = cv2.imread(name)
center_angle = float(batch_sample[3])
images.append(center_image)
angles.append(center_angle)
#Append the flipped image and inverse of angle
images.append(np.fliplr(center_image))
angles.append(center_angle * -1.0)
name2 = 'data/'+ batch_sample[1]
left_image = cv2.imread(name2)
left_angle = float(batch_sample[3]) + 0.2
#Append the left image and offsetted angle
left_images.append(left_image)
left_angles.append(left_angle)
#Include flipped left images.
left_images.append(np.fliplr(left_image))
left_angles.append(left_angle * -1.0)
name3 = 'data/'+ batch_sample[2]
right_image = cv2.imread(name3)
right_angle = float(batch_sample[3]) - 0.2
#Append the right image and offsetted angle
right_images.append(right_image)
right_angles.append(right_angle)
#Include flipped right images.
right_images.append(np.fliplr(right_image))
right_angles.append(right_angle * -1.0)
if len(images) != 0:
X_train = np.array(images)
y_train = np.array(angles)
if len(left_images) != 0:
X_train_left = np.array(left_images)
y_train_left = np.array(left_angles)
X_train = np.concatenate((X_train, X_train_left), axis=0)
y_train = np.concatenate((y_train, y_train_left), axis=0)
if len(right_images) != 0:
X_train_right = np.array(right_images)
y_train_right = np.array(right_angles)
X_train = np.concatenate((X_train, X_train_right), axis=0)
y_train = np.concatenate((y_train, y_train_right), axis=0)
yield sklearn.utils.shuffle(X_train, y_train)
# compile and train the model using the generator function
train_generator = generator(train_samples, train=True, batch_size=batch_size)
validation_generator = generator(validation_samples, train=False, batch_size=batch_size)
#Load the model from h5 file
model = load_model('model.h5')
#To save the model after every epoch
checkpoint = ModelCheckpoint('model.h5')
#Fit the model for 20 epochs.
model.fit_generator(train_generator, steps_per_epoch= num_train_steps, validation_data=validation_generator, validation_steps= num_validation_steps, epochs=20, verbose=1, callbacks=[checkpoint])
#Save the model finally.
model.save('model.h5') |
the-stack_106_28253 | import os
import re
from jinja2 import Environment, FileSystemLoader
def extract_version_parts(git_response):
regex = r"v(\d)\.(\d)\.(\d)(?:-(\d+)-([a-z0-9]+)(?:-([a-z0-9]+))?)?"
matches = re.finditer(regex, git_response, re.MULTILINE)
groups = list(matches)[0].groups()
if len(groups) > 3:
commits_since_tag = groups[3]
if groups[3]:
commits_since_tag = groups[3]
else:
commits_since_tag = '0'
commit_sha = groups[4]
else:
commits_since_tag = '0'
commit_sha = None
four_part_version = list(groups[0:3]) + [commits_since_tag]
version_info = {
'four_part_version': four_part_version,
'is_dirty': (len(groups) > 4)
}
return version_info
# The full version, including alpha/beta/rc tags.
release = os.popen('git describe --tags --dirty').read().strip()
print(release) # Returns something like v1.5.1-4-gc25ef16-dirty
release_parts = release.split('-')
basic_version = release_parts[0]
commits_since_tag = release_parts[1] if len(release_parts) > 1 else None
sha = release_parts[2] if len(release_parts) > 2 else None
dirty_flag = release_parts[3] if len(release_parts) > 3 else None
# Write the version used to display version info in the web gui and logs.
with open(os.path.join("../EDScoutWebUI", "version.py"), "w") as f:
f.write(f'release = "{release}"\n')
f.write(f'version = "{basic_version}"\n')
# record the version more simply here to aid the packaging process
with open("version.txt", "w") as f:
f.write(f'{release}')
env = Environment(
loader=FileSystemLoader('.'),
)
template = env.get_template('version_template.txt')
version_parts = extract_version_parts(release)
csv_version = ', '.join(version_parts['four_part_version']) # Something like 1,5,1,0
short_version = '.'.join(version_parts['four_part_version'][0:3]) # Something like 1.5.1
long_version = release # Something like v1.5.1-4-gc25ef16-dirty
rendered_verion_file = template.render(csv_version=csv_version, short_version=short_version, long_version=long_version)
# print(rendered_verion_file)
with open("version_for_installer.txt", "w") as f:
f.write(rendered_verion_file)
|
the-stack_106_28254 | import importlib
import os
import sys
import jinja2
from flask import Flask
from flask import send_from_directory
from flask_admin import Admin
from flask_admin.menu import MenuLink
from flask_login import current_user
from shopyo.api.file import trycopy
from shopyo.config import app_config
from shopyo.init import csrf
from shopyo.init import db
from shopyo.init import login_manager
from shopyo.init import ma
from shopyo.init import mail
from shopyo.init import migrate
from shopyo.init import modules_path
from shopyo.modules.box__default.settings.helpers import get_setting
from shopyo.modules.box__default.settings.models import Settings
from shopyo.shopyo_admin import DefaultModelView
from shopyo.shopyo_admin import MyAdminIndexView
base_path = os.path.dirname(os.path.abspath(__file__))
def create_app(config_name="development"):
global_entities = {}
app = Flask(__name__, instance_relative_config=True)
load_config_from_obj(app, config_name)
load_config_from_instance(app, config_name)
create_config_json()
load_extensions(app)
setup_flask_admin(app)
register_devstatic(app)
load_blueprints(app, global_entities)
setup_theme_paths(app)
inject_global_vars(app, global_entities)
return app
def load_config_from_obj(app, config_name):
try:
configuration = app_config[config_name]
except KeyError as e:
print(
f"[ ] Invalid config name {e}. Available configurations are: "
f"{list(app_config.keys())}\n"
)
sys.exit(1)
app.config.from_object(configuration)
def load_config_from_instance(app, config_name):
if config_name != "testing":
# load the instance config, if it exists, when not testing
app.config.from_pyfile("config.py", silent=True)
# create empty instance folder and empty config if not present
try:
os.makedirs(app.instance_path)
with open(os.path.join(app.instance_path, "config.py"), "a"):
pass
except OSError:
pass
def create_config_json():
if not os.path.exists("config.json"):
trycopy("config_demo.json", "config.json")
def load_extensions(app):
migrate.init_app(app, db)
db.init_app(app)
ma.init_app(app)
mail.init_app(app)
login_manager.init_app(app)
csrf.init_app(app)
def setup_flask_admin(app):
admin = Admin(
app,
name="My App",
template_mode="bootstrap4",
index_view=MyAdminIndexView(),
)
admin.add_view(DefaultModelView(Settings, db.session))
admin.add_link(MenuLink(name="Logout", category="", url="/auth/logout?next=/admin"))
def register_devstatic(app):
@app.route("/devstatic/<path:boxormodule>/f/<path:filename>")
def devstatic(boxormodule, filename):
if app.config["DEBUG"]:
module_static = os.path.join(modules_path, boxormodule, "static")
return send_from_directory(module_static, filename=filename)
def load_blueprints(app, global_entities):
for folder in os.listdir(os.path.join(base_path, "modules")):
if folder.startswith("__"): # ignore __pycache__
continue
if folder.startswith("box__"):
# boxes
for sub_folder in os.listdir(os.path.join(base_path, "modules", folder)):
if sub_folder.startswith("__"): # ignore __pycache__
continue
elif sub_folder.endswith(".json"): # box_info.json
continue
try:
sys_mod = importlib.import_module(
f"shopyo.modules.{folder}.{sub_folder}.view"
)
app.register_blueprint(getattr(sys_mod, f"{sub_folder}_blueprint"))
except AttributeError:
pass
try:
mod_global = importlib.import_module(
f"shopyo.modules.{folder}.{sub_folder}.global"
)
global_entities.update(mod_global.available_everywhere)
except ImportError:
pass
else:
# apps
try:
mod = importlib.import_module(f"shopyo.modules.{folder}.view")
app.register_blueprint(getattr(mod, f"{folder}_blueprint"))
except AttributeError:
# print("[ ] Blueprint skipped:", e)
pass
try:
mod_global = importlib.import_module(f"shopyo.modules.{folder}.global")
global_entities.update(mod_global.available_everywhere)
except ImportError:
# print(f"[ ] {e}")
pass
def setup_theme_paths(app):
with app.app_context():
front_theme_dir = os.path.join(
app.config["BASE_DIR"], "static", "themes", "front"
)
back_theme_dir = os.path.join(
app.config["BASE_DIR"], "static", "themes", "back"
)
my_loader = jinja2.ChoiceLoader(
[
app.jinja_loader,
jinja2.FileSystemLoader([front_theme_dir, back_theme_dir]),
]
)
app.jinja_loader = my_loader
def inject_global_vars(app, global_entities):
@app.context_processor
def inject_global_vars():
APP_NAME = get_setting("APP_NAME")
base_context = {
"APP_NAME": APP_NAME,
"len": len,
"current_user": current_user,
}
base_context.update(global_entities)
return base_context
|
the-stack_106_28255 | import socket
import InfoSource
class SocketPinger(InfoSource.InfoSource):
def __init__(self):
super(SocketPinger, self).__init__()
self.IP = ""
self.Port = 0
self.Name = "Server status"
self.StatusStr = ("UP", "DOWN")
def __call__(self):
Sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Result = Sock.connect_ex((self.IP, self.Port))
if Result == 0:
self.Result = self.StatusStr[0]
else:
self.Result = self.StatusStr[1]
return self.Result
@classmethod
def fromDict(cls, src_dict):
Pinger = cls()
Pinger.Name = src_dict["Name"]
Pinger.IP = src_dict["IP"]
Pinger.Port = src_dict["Port"]
if "StatusStr" in src_dict:
Pinger.StatusStr = src_dict["StatusStr"]
return Pinger
|
the-stack_106_28256 | from copy import deepcopy
from types import MethodType
class Prototype(object):
"""
Prototype design pattern abstract class.
- External Usage documentation: U{https://github.com/tylerlaberge/PyPattyrn#prototype-pattern}
- External Prototype Pattern documentation: U{https://en.wikipedia.org/wiki/Prototype_pattern}
"""
def prototype(self, **attributes):
"""
Copy the prototype this object and optionally update attributes.
@param attributes: Keyword arguments of any attributes you wish to update.
@return: A copy of this object with the updated attributes.
"""
obj = deepcopy(self)
for attribute in attributes:
if callable(attributes[attribute]):
setattr(obj, attribute, MethodType(attributes[attribute], obj))
else:
setattr(obj, attribute, attributes[attribute])
return obj
|
the-stack_106_28257 | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
from collections import defaultdict
s = input()
n = len(s)
if n == 0 or n == 1:
print(n)
else:
freq = defaultdict(int)
max_char = len(set(s))
min_len = n + 1
start = 0
ans = n + 1
for i in range(n):
freq[s[i]] += 1
if len(freq) == max_char:
while start < i and freq[s[start]] > 1:
freq[s[start]] -= 1
start += 1
curr_len = i - start + 1
min_len = min(min_len, curr_len)
print(min_len)
|
the-stack_106_28258 | #!D:\PTU\Gardenia\venv\Scripts\python.exe
# Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd
# This script is part of the xlrd package, which is released under a
# BSD-style licence.
from __future__ import print_function
cmd_doc = """
Commands:
2rows Print the contents of first and last row in each sheet
3rows Print the contents of first, second and last row in each sheet
bench Same as "show", but doesn't print -- for profiling
biff_count[1] Print a count of each type of BIFF record in the file
biff_dump[1] Print a dump (char and hex) of the BIFF records in the file
fonts hdr + print a dump of all font objects
hdr Mini-overview of file (no per-sheet information)
hotshot Do a hotshot profile run e.g. ... -f1 hotshot bench bigfile*.xls
labels Dump of sheet.col_label_ranges and ...row... for each sheet
name_dump Dump of each object in book.name_obj_list
names Print brief information for each NAME record
ov Overview of file
profile Like "hotshot", but uses cProfile
show Print the contents of all rows in each sheet
version[0] Print versions of xlrd and Python and exit
xfc Print "XF counts" and cell-type counts -- see code for details
[0] means no file arg
[1] means only one file arg i.e. no glob.glob pattern
"""
options = None
if __name__ == "__main__":
PSYCO = 0
import xlrd
import sys
import time
import glob
import traceback
import gc
from xlrd.timemachine import xrange, REPR
class LogHandler(object):
def __init__(self, logfileobj):
self.logfileobj = logfileobj
self.fileheading = None
self.shown = 0
def setfileheading(self, fileheading):
self.fileheading = fileheading
self.shown = 0
def write(self, text):
if self.fileheading and not self.shown:
self.logfileobj.write(self.fileheading)
self.shown = 1
self.logfileobj.write(text)
null_cell = xlrd.empty_cell
def show_row(bk, sh, rowx, colrange, printit):
if bk.ragged_rows:
colrange = range(sh.row_len(rowx))
if not colrange: return
if printit: print()
if bk.formatting_info:
for colx, ty, val, cxfx in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r, xfx: %s"
% (xlrd.colname(colx), rowx+1, ty, val, cxfx))
else:
for colx, ty, val, _unused in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r" % (xlrd.colname(colx), rowx+1, ty, val))
def get_row_data(bk, sh, rowx, colrange):
result = []
dmode = bk.datemode
ctys = sh.row_types(rowx)
cvals = sh.row_values(rowx)
for colx in colrange:
cty = ctys[colx]
cval = cvals[colx]
if bk.formatting_info:
cxfx = str(sh.cell_xf_index(rowx, colx))
else:
cxfx = ''
if cty == xlrd.XL_CELL_DATE:
try:
showval = xlrd.xldate_as_tuple(cval, dmode)
except xlrd.XLDateError as e:
showval = "%s:%s" % (type(e).__name__, e)
cty = xlrd.XL_CELL_ERROR
elif cty == xlrd.XL_CELL_ERROR:
showval = xlrd.error_text_from_code.get(cval, '<Unknown error code 0x%02x>' % cval)
else:
showval = cval
result.append((colx, cty, showval, cxfx))
return result
def bk_header(bk):
print()
print("BIFF version: %s; datemode: %s"
% (xlrd.biff_text_from_num[bk.biff_version], bk.datemode))
print("codepage: %r (encoding: %s); countries: %r"
% (bk.codepage, bk.encoding, bk.countries))
print("Last saved by: %r" % bk.user_name)
print("Number of data sheets: %d" % bk.nsheets)
print("Use mmap: %d; Formatting: %d; On demand: %d"
% (bk.use_mmap, bk.formatting_info, bk.on_demand))
print("Ragged rows: %d" % bk.ragged_rows)
if bk.formatting_info:
print("FORMATs: %d, FONTs: %d, XFs: %d"
% (len(bk.format_list), len(bk.font_list), len(bk.xf_list)))
if not options.suppress_timing:
print("Load time: %.2f seconds (stage 1) %.2f seconds (stage 2)"
% (bk.load_time_stage_1, bk.load_time_stage_2))
print()
def show_fonts(bk):
print("Fonts:")
for x in xrange(len(bk.font_list)):
font = bk.font_list[x]
font.dump(header='== Index %d ==' % x, indent=4)
def show_names(bk, dump=0):
bk_header(bk)
if bk.biff_version < 50:
print("Names not extracted in this BIFF version")
return
nlist = bk.name_obj_list
print("Name list: %d entries" % len(nlist))
for nobj in nlist:
if dump:
nobj.dump(sys.stdout,
header="\n=== Dump of name_obj_list[%d] ===" % nobj.name_index)
else:
print("[%d]\tName:%r macro:%r scope:%d\n\tresult:%r\n"
% (nobj.name_index, nobj.name, nobj.macro, nobj.scope, nobj.result))
def print_labels(sh, labs, title):
if not labs:return
for rlo, rhi, clo, chi in labs:
print("%s label range %s:%s contains:"
% (title, xlrd.cellname(rlo, clo), xlrd.cellname(rhi-1, chi-1)))
for rx in xrange(rlo, rhi):
for cx in xrange(clo, chi):
print(" %s: %r" % (xlrd.cellname(rx, cx), sh.cell_value(rx, cx)))
def show_labels(bk):
# bk_header(bk)
hdr = 0
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
clabs = sh.col_label_ranges
rlabs = sh.row_label_ranges
if clabs or rlabs:
if not hdr:
bk_header(bk)
hdr = 1
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
print_labels(sh, clabs, 'Col')
print_labels(sh, rlabs, 'Row')
if bk.on_demand: bk.unload_sheet(shx)
def show(bk, nshow=65535, printit=1):
bk_header(bk)
if 0:
rclist = xlrd.sheet.rc_stats.items()
rclist = sorted(rclist)
print("rc stats")
for k, v in rclist:
print("0x%04x %7d" % (k, v))
if options.onesheet:
try:
shx = int(options.onesheet)
except ValueError:
shx = bk.sheet_by_name(options.onesheet).number
shxrange = [shx]
else:
shxrange = range(bk.nsheets)
# print("shxrange", list(shxrange))
for shx in shxrange:
sh = bk.sheet_by_index(shx)
nrows, ncols = sh.nrows, sh.ncols
colrange = range(ncols)
anshow = min(nshow, nrows)
print("sheet %d: name = %s; nrows = %d; ncols = %d" %
(shx, REPR(sh.name), sh.nrows, sh.ncols))
if nrows and ncols:
# Beat the bounds
for rowx in xrange(nrows):
nc = sh.row_len(rowx)
if nc:
sh.row_types(rowx)[nc-1]
sh.row_values(rowx)[nc-1]
sh.cell(rowx, nc-1)
for rowx in xrange(anshow-1):
if not printit and rowx % 10000 == 1 and rowx > 1:
print("done %d rows" % (rowx-1,))
show_row(bk, sh, rowx, colrange, printit)
if anshow and nrows:
show_row(bk, sh, nrows-1, colrange, printit)
print()
if bk.on_demand: bk.unload_sheet(shx)
def count_xfs(bk):
bk_header(bk)
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
nrows = sh.nrows
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
# Access all xfindexes to force gathering stats
type_stats = [0, 0, 0, 0, 0, 0, 0]
for rowx in xrange(nrows):
for colx in xrange(sh.row_len(rowx)):
xfx = sh.cell_xf_index(rowx, colx)
assert xfx >= 0
cty = sh.cell_type(rowx, colx)
type_stats[cty] += 1
print("XF stats", sh._xf_index_stats)
print("type stats", type_stats)
print()
if bk.on_demand: bk.unload_sheet(shx)
def main(cmd_args):
import optparse
global options, PSYCO
usage = "\n%prog [options] command [input-file-patterns]\n" + cmd_doc
oparser = optparse.OptionParser(usage)
oparser.add_option(
"-l", "--logfilename",
default="",
help="contains error messages")
oparser.add_option(
"-v", "--verbosity",
type="int", default=0,
help="level of information and diagnostics provided")
oparser.add_option(
"-m", "--mmap",
type="int", default=-1,
help="1: use mmap; 0: don't use mmap; -1: accept heuristic")
oparser.add_option(
"-e", "--encoding",
default="",
help="encoding override")
oparser.add_option(
"-f", "--formatting",
type="int", default=0,
help="0 (default): no fmt info\n"
"1: fmt info (all cells)\n",
)
oparser.add_option(
"-g", "--gc",
type="int", default=0,
help="0: auto gc enabled; 1: auto gc disabled, manual collect after each file; 2: no gc")
oparser.add_option(
"-s", "--onesheet",
default="",
help="restrict output to this sheet (name or index)")
oparser.add_option(
"-u", "--unnumbered",
action="store_true", default=0,
help="omit line numbers or offsets in biff_dump")
oparser.add_option(
"-d", "--on-demand",
action="store_true", default=0,
help="load sheets on demand instead of all at once")
oparser.add_option(
"-t", "--suppress-timing",
action="store_true", default=0,
help="don't print timings (diffs are less messy)")
oparser.add_option(
"-r", "--ragged-rows",
action="store_true", default=0,
help="open_workbook(..., ragged_rows=True)")
options, args = oparser.parse_args(cmd_args)
if len(args) == 1 and args[0] in ("version", ):
pass
elif len(args) < 2:
oparser.error("Expected at least 2 args, found %d" % len(args))
cmd = args[0]
xlrd_version = getattr(xlrd, "__VERSION__", "unknown; before 0.5")
if cmd == 'biff_dump':
xlrd.dump(args[1], unnumbered=options.unnumbered)
sys.exit(0)
if cmd == 'biff_count':
xlrd.count_records(args[1])
sys.exit(0)
if cmd == 'version':
print("xlrd: %s, from %s" % (xlrd_version, xlrd.__file__))
print("Python:", sys.version)
sys.exit(0)
if options.logfilename:
logfile = LogHandler(open(options.logfilename, 'w'))
else:
logfile = sys.stdout
mmap_opt = options.mmap
mmap_arg = xlrd.USE_MMAP
if mmap_opt in (1, 0):
mmap_arg = mmap_opt
elif mmap_opt != -1:
print('Unexpected value (%r) for mmap option -- assuming default' % mmap_opt)
fmt_opt = options.formatting | (cmd in ('xfc', ))
gc_mode = options.gc
if gc_mode:
gc.disable()
for pattern in args[1:]:
for fname in glob.glob(pattern):
print("\n=== File: %s ===" % fname)
if logfile != sys.stdout:
logfile.setfileheading("\n=== File: %s ===\n" % fname)
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC before open:", n_unreachable, "unreachable objects")
if PSYCO:
import psyco
psyco.full()
PSYCO = 0
try:
t0 = time.time()
bk = xlrd.open_workbook(
fname,
verbosity=options.verbosity, logfile=logfile,
use_mmap=mmap_arg,
encoding_override=options.encoding,
formatting_info=fmt_opt,
on_demand=options.on_demand,
ragged_rows=options.ragged_rows,
)
t1 = time.time()
if not options.suppress_timing:
print("Open took %.2f seconds" % (t1-t0,))
except xlrd.XLRDError as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
continue
except KeyboardInterrupt:
print("*** KeyboardInterrupt ***")
traceback.print_exc(file=sys.stdout)
sys.exit(1)
except BaseException as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
traceback.print_exc(file=sys.stdout)
continue
t0 = time.time()
if cmd == 'hdr':
bk_header(bk)
elif cmd == 'ov': # OverView
show(bk, 0)
elif cmd == 'show': # all rows
show(bk)
elif cmd == '2rows': # first row and last row
show(bk, 2)
elif cmd == '3rows': # first row, 2nd row and last row
show(bk, 3)
elif cmd == 'bench':
show(bk, printit=0)
elif cmd == 'fonts':
bk_header(bk)
show_fonts(bk)
elif cmd == 'names': # named reference list
show_names(bk)
elif cmd == 'name_dump': # named reference list
show_names(bk, dump=1)
elif cmd == 'labels':
show_labels(bk)
elif cmd == 'xfc':
count_xfs(bk)
else:
print("*** Unknown command <%s>" % cmd)
sys.exit(1)
del bk
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC post cmd:", fname, "->", n_unreachable, "unreachable objects")
if not options.suppress_timing:
t1 = time.time()
print("\ncommand took %.2f seconds\n" % (t1-t0,))
return None
av = sys.argv[1:]
if not av:
main(av)
firstarg = av[0].lower()
if firstarg == "hotshot":
import hotshot
import hotshot.stats
av = av[1:]
prof_log_name = "XXXX.prof"
prof = hotshot.Profile(prof_log_name)
# benchtime, result = prof.runcall(main, *av)
result = prof.runcall(main, *(av, ))
print("result", repr(result))
prof.close()
stats = hotshot.stats.load(prof_log_name)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
elif firstarg == "profile":
import cProfile
av = av[1:]
cProfile.run('main(av)', 'YYYY.prof')
import pstats
p = pstats.Stats('YYYY.prof')
p.strip_dirs().sort_stats('cumulative').print_stats(30)
elif firstarg == "psyco":
PSYCO = 1
main(av[1:])
else:
main(av)
|
the-stack_106_28259 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import io
from unittest import TestCase, main
import matplotlib as mpl
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import scipy.spatial.distance
from IPython.core.display import Image, SVG
import skbio.sequence.distance
from skbio import DistanceMatrix, Sequence
from skbio.stats.distance import (
DissimilarityMatrixError, DistanceMatrixError, MissingIDError,
DissimilarityMatrix, randdm)
from skbio.stats.distance._base import (_preprocess_input,
_run_monte_carlo_stats)
from skbio.stats.distance._utils import is_symmetric_and_hollow
from skbio.util import assert_data_frame_almost_equal
from skbio.util._testing import assert_series_almost_equal
class DissimilarityMatrixTestData(TestCase):
def setUp(self):
self.dm_1x1_data = [[0.0]]
self.dm_2x2_data = [[0.0, 0.123], [0.123, 0.0]]
self.dm_2x2_asym_data = [[0.0, 1.0], [-2.0, 0.0]]
self.dm_3x3_data = [[0.0, 0.01, 4.2], [0.01, 0.0, 12.0],
[4.2, 12.0, 0.0]]
self.dm_5x5_data = [[0, 1, 2, 3, 4],
[5, 0, 6, 7, 8],
[9, 1, 0, 2, 3],
[4, 5, 6, 0, 7],
[8, 9, 1, 2, 0]]
class DissimilarityMatrixTests(DissimilarityMatrixTestData):
def setUp(self):
super(DissimilarityMatrixTests, self).setUp()
self.dm_1x1 = DissimilarityMatrix(self.dm_1x1_data, ['a'])
self.dm_2x2 = DissimilarityMatrix(self.dm_2x2_data, ['a', 'b'])
self.dm_2x2_asym = DissimilarityMatrix(self.dm_2x2_asym_data,
['a', 'b'])
self.dm_3x3 = DissimilarityMatrix(self.dm_3x3_data, ['a', 'b', 'c'])
self.dm_5x5 = DissimilarityMatrix(self.dm_5x5_data, list('abcde'))
self.dms = [self.dm_1x1, self.dm_2x2, self.dm_2x2_asym, self.dm_3x3]
self.dm_shapes = [(1, 1), (2, 2), (2, 2), (3, 3)]
self.dm_sizes = [1, 4, 4, 9]
self.dm_transposes = [
self.dm_1x1, self.dm_2x2,
DissimilarityMatrix([[0, -2], [1, 0]], ['a', 'b']), self.dm_3x3]
self.dm_redundant_forms = [np.array(self.dm_1x1_data),
np.array(self.dm_2x2_data),
np.array(self.dm_2x2_asym_data),
np.array(self.dm_3x3_data)]
def test_avoid_copy_on_construction(self):
# ((data, expect_copy))
tests = (([[0, 1], [1, 0]], True),
([(0, 1), (1, 0)], True),
(((0, 1), (1, 0)), True),
(np.array([[0, 1], [1, 0]], dtype='int'), True),
(np.array([[0, 1], [1, 0]], dtype='float'), False),
(np.array([[0, 1], [1, 0]], dtype=np.float32), False),
(np.array([[0, 1], [1, 0]], dtype=np.float64), False),
(np.array([[0, 1], [1, 0]], dtype='double'), False))
for data, expect in tests:
obj = DissimilarityMatrix(data)
self.assertEqual(id(obj.data) != id(data), expect)
def test_within(self):
exp = pd.DataFrame([['a', 'a', 0.0],
['a', 'c', 4.2],
['c', 'a', 4.2],
['c', 'c', 0.0]],
columns=['i', 'j', 'value'])
obs = self.dm_3x3.within(['a', 'c'])
pdt.assert_frame_equal(obs, exp)
def test_within_order_stability(self):
exp = pd.DataFrame([['a', 'a', 0.0],
['a', 'c', 4.2],
['c', 'a', 4.2],
['c', 'c', 0.0]],
columns=['i', 'j', 'value'])
# NOTE: order was changed from ['a', 'c'] to ['c', 'a']
# but the output order in exp is consistent with
# test_within
obs = self.dm_3x3.within(['c', 'a'])
pdt.assert_frame_equal(obs, exp)
obs = self.dm_3x3.within(['a', 'c'])
pdt.assert_frame_equal(obs, exp)
def test_within_missing_id(self):
with self.assertRaisesRegex(MissingIDError, "not found."):
self.dm_3x3.within(['x', 'a'])
def test_between(self):
exp = pd.DataFrame([['b', 'a', 5.],
['b', 'c', 6.],
['b', 'e', 8.],
['d', 'a', 4.],
['d', 'c', 6.],
['d', 'e', 7.]],
columns=['i', 'j', 'value'])
obs = self.dm_5x5.between(['b', 'd'], ['a', 'c', 'e'])
pdt.assert_frame_equal(obs, exp)
def test_between_order_stability(self):
exp = pd.DataFrame([['b', 'a', 5.],
['b', 'c', 6.],
['b', 'e', 8.],
['d', 'a', 4.],
['d', 'c', 6.],
['d', 'e', 7.]],
columns=['i', 'j', 'value'])
# varying the order of the "i" values, result remains consistent
# with the test_between result
obs = self.dm_5x5.between(['d', 'b'], ['a', 'c', 'e'])
pdt.assert_frame_equal(obs, exp)
# varying the order of the "j" values, result remains consistent
# with the test_between result
obs = self.dm_5x5.between(['b', 'd'], ['a', 'e', 'c'])
pdt.assert_frame_equal(obs, exp)
# varying the order of the "i" and "j" values, result remains
# consistent with the test_between result
obs = self.dm_5x5.between(['d', 'b'], ['a', 'e', 'c'])
pdt.assert_frame_equal(obs, exp)
def test_between_overlap(self):
exp = pd.DataFrame([['b', 'a', 5.],
['b', 'd', 7.],
['b', 'e', 8.],
['d', 'a', 4.],
['d', 'd', 0.],
['d', 'e', 7.]],
columns=['i', 'j', 'value'])
# 'd' in i and j overlap
with self.assertRaisesRegex(KeyError, ("This constraint can "
"removed with "
"allow_overlap=True.")):
self.dm_5x5.between(['b', 'd'], ['a', 'd', 'e'])
obs = self.dm_5x5.between(['b', 'd'], ['a', 'd', 'e'],
allow_overlap=True)
pdt.assert_frame_equal(obs, exp)
def test_between_missing_id(self):
with self.assertRaisesRegex(MissingIDError, "not found."):
self.dm_3x3.between(['x', 'a'], ['a', 'b', 'c'])
with self.assertRaisesRegex(MissingIDError, "not found."):
self.dm_3x3.between(['a', 'b'], ['a', 'x', 'c'])
with self.assertRaisesRegex(MissingIDError, "not found."):
self.dm_3x3.between(['a', 'y'], ['a', 'x', 'c'])
def test_stable_order(self):
exp = np.array([1, 3, 4], dtype=int)
obs = self.dm_5x5._stable_order(['d', 'e', 'b'])
npt.assert_equal(obs, exp)
def test_subset_to_dataframe(self):
exp = pd.DataFrame([['b', 'a', 5.],
['b', 'd', 7.],
['b', 'e', 8.],
['d', 'a', 4.],
['d', 'd', 0.],
['d', 'e', 7.]],
columns=['i', 'j', 'value'])
obs = self.dm_5x5._subset_to_dataframe(['b', 'd'], ['a', 'd', 'e'])
pdt.assert_frame_equal(obs, exp)
# and the empty edge cases
exp = pd.DataFrame([],
columns=['i', 'j', 'value'],
index=pd.RangeIndex(start=0, stop=0))
obs = self.dm_5x5._subset_to_dataframe([], ['a', 'd', 'e'])
pdt.assert_frame_equal(obs, exp, check_dtype=False)
obs = self.dm_5x5._subset_to_dataframe(['b', 'd'], [])
pdt.assert_frame_equal(obs, exp, check_dtype=False)
obs = self.dm_5x5._subset_to_dataframe([], [])
pdt.assert_frame_equal(obs, exp, check_dtype=False)
def test_init_from_dm(self):
ids = ['foo', 'bar', 'baz']
# DissimilarityMatrix -> DissimilarityMatrix
exp = DissimilarityMatrix(self.dm_3x3_data, ids)
obs = DissimilarityMatrix(self.dm_3x3, ids)
self.assertEqual(obs, exp)
# Test that copy of data is not made.
self.assertTrue(obs.data is self.dm_3x3.data)
obs.data[0, 1] = 424242
self.assertTrue(np.array_equal(obs.data, self.dm_3x3.data))
# DistanceMatrix -> DissimilarityMatrix
exp = DissimilarityMatrix(self.dm_3x3_data, ids)
obs = DissimilarityMatrix(
DistanceMatrix(self.dm_3x3_data, ('a', 'b', 'c')), ids)
self.assertEqual(obs, exp)
# DissimilarityMatrix -> DistanceMatrix
with self.assertRaises(DistanceMatrixError):
DistanceMatrix(self.dm_2x2_asym, ['foo', 'bar'])
def test_init_non_hollow_dm(self):
data = [[1, 1], [1, 1]]
obs = DissimilarityMatrix(data, ['a', 'b'])
self.assertTrue(np.array_equal(obs.data, data))
data_hollow = skbio.stats.distance._utils.is_hollow(obs.data)
self.assertEqual(data_hollow, False)
def test_init_no_ids(self):
exp = DissimilarityMatrix(self.dm_3x3_data, ('0', '1', '2'))
obs = DissimilarityMatrix(self.dm_3x3_data)
self.assertEqual(obs, exp)
self.assertEqual(obs['1', '2'], 12.0)
def test_init_invalid_input(self):
# Empty data.
with self.assertRaises(DissimilarityMatrixError):
DissimilarityMatrix([], [])
# Another type of empty data.
with self.assertRaises(DissimilarityMatrixError):
DissimilarityMatrix(np.empty((0, 0)), [])
# Invalid number of dimensions.
with self.assertRaises(DissimilarityMatrixError):
DissimilarityMatrix([1, 2, 3], ['a'])
# Dimensions don't match.
with self.assertRaises(DissimilarityMatrixError):
DissimilarityMatrix([[1, 2, 3]], ['a'])
data = [[0, 1], [1, 0]]
# Duplicate IDs.
with self.assertRaises(DissimilarityMatrixError):
DissimilarityMatrix(data, ['a', 'a'])
# Number of IDs don't match dimensions.
with self.assertRaises(DissimilarityMatrixError):
DissimilarityMatrix(data, ['a', 'b', 'c'])
with self.assertRaises(DissimilarityMatrixError):
DissimilarityMatrix(data, [])
def test_from_iterable_non_hollow_data(self):
iterable = (x for x in range(4))
exp = DissimilarityMatrix([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
res = DissimilarityMatrix.from_iterable(iterable, lambda a, b: 1)
self.assertEqual(res, exp)
def test_from_iterable_asymmetric_data(self):
iterable = (x for x in range(4))
exp = DissimilarityMatrix([[0, 1, 2, 3],
[-1, 0, 1, 2],
[-2, -1, 0, 1],
[-3, -2, -1, 0]])
res = DissimilarityMatrix.from_iterable(iterable, lambda a, b: b - a)
self.assertEqual(res, exp)
def test_from_iterable_no_key(self):
iterable = (x for x in range(4))
exp = DissimilarityMatrix([[0, 1, 2, 3],
[1, 0, 1, 2],
[2, 1, 0, 1],
[3, 2, 1, 0]])
res = DissimilarityMatrix.from_iterable(iterable,
lambda a, b: abs(b - a))
self.assertEqual(res, exp)
def test_from_iterable_with_key(self):
iterable = (x for x in range(4))
exp = DissimilarityMatrix([[0, 1, 2, 3],
[1, 0, 1, 2],
[2, 1, 0, 1],
[3, 2, 1, 0]], ['0', '1', '4', '9'])
res = DissimilarityMatrix.from_iterable(iterable,
lambda a, b: abs(b - a),
key=lambda x: str(x ** 2))
self.assertEqual(res, exp)
def test_from_iterable_empty(self):
with self.assertRaises(DissimilarityMatrixError):
DissimilarityMatrix.from_iterable([], lambda x: x)
def test_from_iterable_single(self):
exp = DissimilarityMatrix([[100]])
res = DissimilarityMatrix.from_iterable(["boo"], lambda a, b: 100)
self.assertEqual(res, exp)
def test_from_iterable_with_keys(self):
iterable = (x for x in range(4))
exp = DissimilarityMatrix([[0, 1, 2, 3],
[1, 0, 1, 2],
[2, 1, 0, 1],
[3, 2, 1, 0]], ['0', '1', '4', '9'])
res = DissimilarityMatrix.from_iterable(iterable,
lambda a, b: abs(b - a),
keys=iter(['0', '1', '4', '9'])
)
self.assertEqual(res, exp)
def test_from_iterable_with_key_and_keys(self):
iterable = (x for x in range(4))
with self.assertRaises(ValueError):
DissimilarityMatrix.from_iterable(iterable,
lambda a, b: abs(b - a),
key=str,
keys=['1', '2', '3', '4'])
def test_from_iterable_scipy_hamming_metric_with_metadata(self):
# test for #1254
seqs = [
Sequence('ACGT'),
Sequence('ACGA', metadata={'id': 'seq1'}),
Sequence('AAAA', metadata={'id': 'seq2'}),
Sequence('AAAA', positional_metadata={'qual': range(4)})
]
exp = DissimilarityMatrix([
[0, 0.25, 0.75, 0.75],
[0.25, 0.0, 0.5, 0.5],
[0.75, 0.5, 0.0, 0.0],
[0.75, 0.5, 0.0, 0.0]], ['a', 'b', 'c', 'd'])
dm = DissimilarityMatrix.from_iterable(
seqs,
metric=scipy.spatial.distance.hamming,
keys=['a', 'b', 'c', 'd'])
self.assertEqual(dm, exp)
def test_from_iterable_skbio_hamming_metric_with_metadata(self):
# test for #1254
seqs = [
Sequence('ACGT'),
Sequence('ACGA', metadata={'id': 'seq1'}),
Sequence('AAAA', metadata={'id': 'seq2'}),
Sequence('AAAA', positional_metadata={'qual': range(4)})
]
exp = DissimilarityMatrix([
[0, 0.25, 0.75, 0.75],
[0.25, 0.0, 0.5, 0.5],
[0.75, 0.5, 0.0, 0.0],
[0.75, 0.5, 0.0, 0.0]], ['a', 'b', 'c', 'd'])
dm = DissimilarityMatrix.from_iterable(
seqs,
metric=skbio.sequence.distance.hamming,
keys=['a', 'b', 'c', 'd'])
self.assertEqual(dm, exp)
def test_data(self):
for dm, exp in zip(self.dms, self.dm_redundant_forms):
obs = dm.data
self.assertTrue(np.array_equal(obs, exp))
with self.assertRaises(AttributeError):
self.dm_3x3.data = 'foo'
def test_ids(self):
obs = self.dm_3x3.ids
self.assertEqual(obs, ('a', 'b', 'c'))
# Test that we overwrite the existing IDs and that the ID index is
# correctly rebuilt.
new_ids = ['foo', 'bar', 'baz']
self.dm_3x3.ids = new_ids
obs = self.dm_3x3.ids
self.assertEqual(obs, tuple(new_ids))
self.assertTrue(np.array_equal(self.dm_3x3['bar'],
np.array([0.01, 0.0, 12.0])))
with self.assertRaises(MissingIDError):
self.dm_3x3['b']
def test_ids_invalid_input(self):
with self.assertRaises(DissimilarityMatrixError):
self.dm_3x3.ids = ['foo', 'bar']
# Make sure that we can still use the dissimilarity matrix after trying
# to be evil.
obs = self.dm_3x3.ids
self.assertEqual(obs, ('a', 'b', 'c'))
def test_dtype(self):
for dm in self.dms:
self.assertEqual(dm.dtype, np.float64)
def test_shape(self):
for dm, shape in zip(self.dms, self.dm_shapes):
self.assertEqual(dm.shape, shape)
def test_size(self):
for dm, size in zip(self.dms, self.dm_sizes):
self.assertEqual(dm.size, size)
def test_transpose(self):
for dm, transpose in zip(self.dms, self.dm_transposes):
self.assertEqual(dm.T, transpose)
self.assertEqual(dm.transpose(), transpose)
# We should get a reference to a different object back, even if the
# transpose is the same as the original.
self.assertTrue(dm.transpose() is not dm)
def test_index(self):
self.assertEqual(self.dm_3x3.index('a'), 0)
self.assertEqual(self.dm_3x3.index('b'), 1)
self.assertEqual(self.dm_3x3.index('c'), 2)
with self.assertRaises(MissingIDError):
self.dm_3x3.index('d')
with self.assertRaises(MissingIDError):
self.dm_3x3.index(1)
def test_redundant_form(self):
for dm, redundant in zip(self.dms, self.dm_redundant_forms):
obs = dm.redundant_form()
self.assertTrue(np.array_equal(obs, redundant))
def test_copy(self):
copy = self.dm_2x2.copy()
self.assertEqual(copy, self.dm_2x2)
self.assertFalse(copy.data is self.dm_2x2.data)
# deepcopy doesn't actually create a copy of the IDs because it is a
# tuple of strings, which is fully immutable.
self.assertTrue(copy.ids is self.dm_2x2.ids)
new_ids = ['hello', 'world']
copy.ids = new_ids
self.assertNotEqual(copy.ids, self.dm_2x2.ids)
copy = self.dm_2x2.copy()
copy.data[0, 1] = 0.0001
self.assertFalse(np.array_equal(copy.data, self.dm_2x2.data))
def test_filter_no_filtering(self):
# Don't actually filter anything -- ensure we get back a different
# object.
obs = self.dm_3x3.filter(['a', 'b', 'c'])
self.assertEqual(obs, self.dm_3x3)
self.assertFalse(obs is self.dm_3x3)
def test_filter_reorder(self):
# Don't filter anything, but reorder the distance matrix.
order = ['c', 'a', 'b']
exp = DissimilarityMatrix(
[[0, 4.2, 12], [4.2, 0, 0.01], [12, 0.01, 0]], order)
obs = self.dm_3x3.filter(order)
self.assertEqual(obs, exp)
def test_filter_single_id(self):
ids = ['b']
exp = DissimilarityMatrix([[0]], ids)
obs = self.dm_2x2_asym.filter(ids)
self.assertEqual(obs, exp)
def test_filter_asymmetric(self):
# 2x2
ids = ['b', 'a']
exp = DissimilarityMatrix([[0, -2], [1, 0]], ids)
obs = self.dm_2x2_asym.filter(ids)
self.assertEqual(obs, exp)
# 3x3
dm = DissimilarityMatrix([[0, 10, 53], [42, 0, 22.5], [53, 1, 0]],
('bro', 'brah', 'breh'))
ids = ['breh', 'brah']
exp = DissimilarityMatrix([[0, 1], [22.5, 0]], ids)
obs = dm.filter(ids)
self.assertEqual(obs, exp)
def test_filter_subset(self):
ids = ('c', 'a')
exp = DissimilarityMatrix([[0, 4.2], [4.2, 0]], ids)
obs = self.dm_3x3.filter(ids)
self.assertEqual(obs, exp)
ids = ('b', 'a')
exp = DissimilarityMatrix([[0, 0.01], [0.01, 0]], ids)
obs = self.dm_3x3.filter(ids)
self.assertEqual(obs, exp)
# 4x4
dm = DissimilarityMatrix([[0, 1, 55, 7], [1, 0, 16, 1],
[55, 16, 0, 23], [7, 1, 23, 0]])
ids = np.asarray(['3', '0', '1'])
exp = DissimilarityMatrix([[0, 7, 1], [7, 0, 1], [1, 1, 0]], ids)
obs = dm.filter(ids)
self.assertEqual(obs, exp)
def test_filter_duplicate_ids(self):
with self.assertRaises(DissimilarityMatrixError):
self.dm_3x3.filter(['c', 'a', 'c'])
def test_filter_missing_ids(self):
with self.assertRaises(MissingIDError):
self.dm_3x3.filter(['c', 'bro'])
def test_filter_missing_ids_strict_false(self):
# no exception should be raised
ids = ('c', 'a')
exp = DissimilarityMatrix([[0, 4.2], [4.2, 0]], ids)
obs = self.dm_3x3.filter(['c', 'a', 'not found'], strict=False)
self.assertEqual(obs, exp)
def test_filter_empty_ids(self):
with self.assertRaises(DissimilarityMatrixError):
self.dm_3x3.filter([])
def test_plot_default(self):
fig = self.dm_1x1.plot()
self.assertIsInstance(fig, mpl.figure.Figure)
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
ax = axes[0]
self.assertEqual(ax.get_title(), '')
xticks = []
for tick in ax.get_xticklabels():
xticks.append(tick.get_text())
self.assertEqual(xticks, ['a'])
yticks = []
for tick in ax.get_yticklabels():
yticks.append(tick.get_text())
self.assertEqual(yticks, ['a'])
def test_plot_no_default(self):
ids = ['0', 'one', '2', 'three', '4.000']
data = ([0, 1, 2, 3, 4], [1, 0, 1, 2, 3], [2, 1, 0, 1, 2],
[3, 2, 1, 0, 1], [4, 3, 2, 1, 0])
dm = DissimilarityMatrix(data, ids)
fig = dm.plot(cmap='Reds', title='Testplot')
self.assertIsInstance(fig, mpl.figure.Figure)
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
ax = axes[0]
self.assertEqual(ax.get_title(), 'Testplot')
xticks = []
for tick in ax.get_xticklabels():
xticks.append(tick.get_text())
self.assertEqual(xticks, ['0', 'one', '2', 'three', '4.000'])
yticks = []
for tick in ax.get_yticklabels():
yticks.append(tick.get_text())
self.assertEqual(yticks, ['0', 'one', '2', 'three', '4.000'])
def test_repr_png(self):
dm = self.dm_1x1
obs = dm._repr_png_()
self.assertIsInstance(obs, bytes)
self.assertTrue(len(obs) > 0)
def test_repr_svg(self):
obs = self.dm_1x1._repr_svg_()
self.assertIsInstance(obs, str)
self.assertTrue(len(obs) > 0)
def test_png(self):
dm = self.dm_1x1
self.assertIsInstance(dm.png, Image)
def test_svg(self):
dm = self.dm_1x1
self.assertIsInstance(dm.svg, SVG)
def test_to_data_frame_1x1(self):
df = self.dm_1x1.to_data_frame()
exp = pd.DataFrame([[0.0]], index=['a'], columns=['a'])
assert_data_frame_almost_equal(df, exp)
def test_to_data_frame_3x3(self):
df = self.dm_3x3.to_data_frame()
exp = pd.DataFrame([[0.0, 0.01, 4.2],
[0.01, 0.0, 12.0],
[4.2, 12.0, 0.0]],
index=['a', 'b', 'c'], columns=['a', 'b', 'c'])
assert_data_frame_almost_equal(df, exp)
def test_to_data_frame_default_ids(self):
df = DissimilarityMatrix(self.dm_2x2_data).to_data_frame()
exp = pd.DataFrame([[0.0, 0.123],
[0.123, 0.0]],
index=['0', '1'], columns=['0', '1'])
assert_data_frame_almost_equal(df, exp)
def test_str(self):
for dm in self.dms:
obs = str(dm)
# Do some very light testing here to make sure we're getting a
# non-empty string back. We don't want to test the exact
# formatting.
self.assertTrue(obs)
def test_eq(self):
for dm in self.dms:
copy = dm.copy()
self.assertTrue(dm == dm)
self.assertTrue(copy == copy)
self.assertTrue(dm == copy)
self.assertTrue(copy == dm)
self.assertFalse(self.dm_1x1 == self.dm_3x3)
def test_ne(self):
# Wrong class.
self.assertTrue(self.dm_3x3 != 'foo')
# Wrong shape.
self.assertTrue(self.dm_3x3 != self.dm_1x1)
# Wrong IDs.
other = self.dm_3x3.copy()
other.ids = ['foo', 'bar', 'baz']
self.assertTrue(self.dm_3x3 != other)
# Wrong data.
other = self.dm_3x3.copy()
other.data[1, 0] = 42.42
self.assertTrue(self.dm_3x3 != other)
self.assertFalse(self.dm_2x2 != self.dm_2x2)
def test_contains(self):
self.assertTrue('a' in self.dm_3x3)
self.assertTrue('b' in self.dm_3x3)
self.assertTrue('c' in self.dm_3x3)
self.assertFalse('d' in self.dm_3x3)
def test_getslice(self):
# Slice of first dimension only. Test that __getslice__ defers to
# __getitem__.
obs = self.dm_2x2[1:]
self.assertTrue(np.array_equal(obs, np.array([[0.123, 0.0]])))
self.assertEqual(type(obs), np.ndarray)
def test_getitem_by_id(self):
obs = self.dm_1x1['a']
self.assertTrue(np.array_equal(obs, np.array([0.0])))
obs = self.dm_2x2_asym['b']
self.assertTrue(np.array_equal(obs, np.array([-2.0, 0.0])))
obs = self.dm_3x3['c']
self.assertTrue(np.array_equal(obs, np.array([4.2, 12.0, 0.0])))
with self.assertRaises(MissingIDError):
self.dm_2x2['c']
def test_getitem_by_id_pair(self):
# Same object.
self.assertEqual(self.dm_1x1['a', 'a'], 0.0)
# Different objects (symmetric).
self.assertEqual(self.dm_3x3['b', 'c'], 12.0)
self.assertEqual(self.dm_3x3['c', 'b'], 12.0)
# Different objects (asymmetric).
self.assertEqual(self.dm_2x2_asym['a', 'b'], 1.0)
self.assertEqual(self.dm_2x2_asym['b', 'a'], -2.0)
with self.assertRaises(MissingIDError):
self.dm_2x2['a', 'c']
def test_getitem_ndarray_indexing(self):
# Single element access.
obs = self.dm_3x3[0, 1]
self.assertEqual(obs, 0.01)
# Single element access (via two __getitem__ calls).
obs = self.dm_3x3[0][1]
self.assertEqual(obs, 0.01)
# Row access.
obs = self.dm_3x3[1]
self.assertTrue(np.array_equal(obs, np.array([0.01, 0.0, 12.0])))
self.assertEqual(type(obs), np.ndarray)
# Grab all data.
obs = self.dm_3x3[:, :]
self.assertTrue(np.array_equal(obs, self.dm_3x3.data))
self.assertEqual(type(obs), np.ndarray)
with self.assertRaises(IndexError):
self.dm_3x3[:, 3]
def test_validate_invalid_dtype(self):
with self.assertRaises(DissimilarityMatrixError):
self.dm_3x3._validate(np.array([[0, 42], [42, 0]]), ['a', 'b'])
def test_validate_invalid_shape(self):
# first check it actually likes good matrices
self.dm_3x3._validate_shape(np.array([[0., 42.], [42., 0.]]))
# it checks just the shape, not the content
self.dm_3x3._validate_shape(np.array([[1., 2.], [3., 4.]]))
# empty array
with self.assertRaises(DissimilarityMatrixError):
self.dm_3x3._validate_shape(np.array([]))
# invalid shape
with self.assertRaises(DissimilarityMatrixError):
self.dm_3x3._validate_shape(np.array([[0., 42.],
[42., 0.],
[22., 22.]]))
with self.assertRaises(DissimilarityMatrixError):
self.dm_3x3._validate_shape(np.array([[[0., 42.], [42., 0.]],
[[0., 24.], [24., 0.]]]))
def test_validate_invalid_ids(self):
# repeated ids
with self.assertRaises(DissimilarityMatrixError):
self.dm_3x3._validate_ids(self.dm_3x3.data, ['a', 'a'])
# empty ids
with self.assertRaises(DissimilarityMatrixError):
self.dm_3x3._validate_ids(self.dm_3x3.data, [])
# invalid shape
with self.assertRaises(DissimilarityMatrixError):
self.dm_3x3._validate_ids(self.dm_3x3.data, ['a', 'b', 'c', 'd'])
class DistanceMatrixTests(DissimilarityMatrixTestData):
def setUp(self):
super(DistanceMatrixTests, self).setUp()
self.dm_1x1 = DistanceMatrix(self.dm_1x1_data, ['a'])
self.dm_2x2 = DistanceMatrix(self.dm_2x2_data, ['a', 'b'])
self.dm_3x3 = DistanceMatrix(self.dm_3x3_data, ['a', 'b', 'c'])
self.dms = [self.dm_1x1, self.dm_2x2, self.dm_3x3]
self.dm_condensed_forms = [np.array([]), np.array([0.123]),
np.array([0.01, 4.2, 12.0])]
def test_init_from_condensed_form(self):
data = [1, 2, 3]
exp = DistanceMatrix([[0, 1, 2],
[1, 0, 3],
[2, 3, 0]], ['0', '1', '2'])
res = DistanceMatrix(data)
self.assertEqual(exp, res)
def test_init_invalid_input(self):
# Asymmetric.
data = [[0.0, 2.0], [1.0, 0.0]]
with self.assertRaises(DistanceMatrixError):
DistanceMatrix(data, ['a', 'b'])
# Non-hollow
data = [[1.0, 2.0], [2.0, 1.0]]
with self.assertRaises(DistanceMatrixError):
DistanceMatrix(data, ['a', 'b'])
# Ensure that the superclass validation is still being performed.
with self.assertRaises(DissimilarityMatrixError):
DistanceMatrix([[1, 2, 3]], ['a'])
def test_init_nans(self):
with self.assertRaisesRegex(DistanceMatrixError, r'NaNs'):
DistanceMatrix([[0.0, np.nan], [np.nan, 0.0]], ['a', 'b'])
def test_from_iterable_no_key(self):
iterable = (x for x in range(4))
exp = DistanceMatrix([[0, 1, 2, 3],
[1, 0, 1, 2],
[2, 1, 0, 1],
[3, 2, 1, 0]])
res = DistanceMatrix.from_iterable(iterable, lambda a, b: abs(b - a))
self.assertEqual(res, exp)
def test_from_iterable_validate_equal_valid_data(self):
validate_true = DistanceMatrix.from_iterable((x for x in range(4)),
lambda a, b: abs(b - a),
validate=True)
validate_false = DistanceMatrix.from_iterable((x for x in range(4)),
lambda a, b: abs(b - a),
validate=False)
self.assertEqual(validate_true, validate_false)
def test_from_iterable_validate_false(self):
iterable = (x for x in range(4))
exp = DistanceMatrix([[0, 1, 2, 3],
[1, 0, 1, 2],
[2, 1, 0, 1],
[3, 2, 1, 0]])
res = DistanceMatrix.from_iterable(iterable, lambda a, b: abs(b - a),
validate=False)
self.assertEqual(res, exp)
def test_from_iterable_validate_non_hollow(self):
iterable = (x for x in range(4))
with self.assertRaises(DistanceMatrixError):
DistanceMatrix.from_iterable(iterable, lambda a, b: 1)
def test_from_iterable_validate_false_non_symmetric(self):
exp = DistanceMatrix([[0, 1, 2, 3],
[1, 0, 1, 2],
[2, 1, 0, 1],
[3, 2, 1, 0]])
res = DistanceMatrix.from_iterable((x for x in range(4)),
lambda a, b: a - b,
validate=False)
self.assertEqual(res, exp)
def test_from_iterable_validate_asym(self):
iterable = (x for x in range(4))
with self.assertRaises(DistanceMatrixError):
DistanceMatrix.from_iterable(iterable, lambda a, b: b - a)
def test_from_iterable_with_key(self):
iterable = (x for x in range(4))
exp = DistanceMatrix([[0, 1, 2, 3],
[1, 0, 1, 2],
[2, 1, 0, 1],
[3, 2, 1, 0]], ['0', '1', '4', '9'])
res = DistanceMatrix.from_iterable(iterable, lambda a, b: abs(b - a),
key=lambda x: str(x**2))
self.assertEqual(res, exp)
def test_from_iterable_empty(self):
with self.assertRaises(DissimilarityMatrixError):
DistanceMatrix.from_iterable([], lambda x: x)
def test_from_iterable_single(self):
exp = DistanceMatrix([[0]])
res = DistanceMatrix.from_iterable(["boo"], lambda a, b: 0)
self.assertEqual(res, exp)
def test_from_iterable_with_keys(self):
iterable = (x for x in range(4))
exp = DistanceMatrix([[0, 1, 2, 3],
[1, 0, 1, 2],
[2, 1, 0, 1],
[3, 2, 1, 0]], ['0', '1', '4', '9'])
res = DistanceMatrix.from_iterable(iterable, lambda a, b: abs(b - a),
keys=iter(['0', '1', '4', '9']))
self.assertEqual(res, exp)
def test_from_iterable_with_key_and_keys(self):
iterable = (x for x in range(4))
with self.assertRaises(ValueError):
DistanceMatrix.from_iterable(iterable, lambda a, b: abs(b - a),
key=str, keys=['1', '2', '3', '4'])
def test_from_iterable_scipy_hamming_metric_with_metadata(self):
# test for #1254
seqs = [
Sequence('ACGT'),
Sequence('ACGA', metadata={'id': 'seq1'}),
Sequence('AAAA', metadata={'id': 'seq2'}),
Sequence('AAAA', positional_metadata={'qual': range(4)})
]
exp = DistanceMatrix([
[0, 0.25, 0.75, 0.75],
[0.25, 0.0, 0.5, 0.5],
[0.75, 0.5, 0.0, 0.0],
[0.75, 0.5, 0.0, 0.0]], ['a', 'b', 'c', 'd'])
dm = DistanceMatrix.from_iterable(
seqs,
metric=scipy.spatial.distance.hamming,
keys=['a', 'b', 'c', 'd'])
self.assertEqual(dm, exp)
def test_from_iterable_skbio_hamming_metric_with_metadata(self):
# test for #1254
seqs = [
Sequence('ACGT'),
Sequence('ACGA', metadata={'id': 'seq1'}),
Sequence('AAAA', metadata={'id': 'seq2'}),
Sequence('AAAA', positional_metadata={'qual': range(4)})
]
exp = DistanceMatrix([
[0, 0.25, 0.75, 0.75],
[0.25, 0.0, 0.5, 0.5],
[0.75, 0.5, 0.0, 0.0],
[0.75, 0.5, 0.0, 0.0]], ['a', 'b', 'c', 'd'])
dm = DistanceMatrix.from_iterable(
seqs,
metric=skbio.sequence.distance.hamming,
keys=['a', 'b', 'c', 'd'])
self.assertEqual(dm, exp)
def test_condensed_form(self):
for dm, condensed in zip(self.dms, self.dm_condensed_forms):
obs = dm.condensed_form()
self.assertTrue(np.array_equal(obs, condensed))
def test_permute_condensed(self):
# Can't really permute a 1x1 or 2x2...
for _ in range(2):
obs = self.dm_1x1.permute(condensed=True)
npt.assert_equal(obs, np.array([]))
for _ in range(2):
obs = self.dm_2x2.permute(condensed=True)
npt.assert_equal(obs, np.array([0.123]))
dm_copy = self.dm_3x3.copy()
np.random.seed(0)
obs = self.dm_3x3.permute(condensed=True)
npt.assert_equal(obs, np.array([12.0, 4.2, 0.01]))
obs = self.dm_3x3.permute(condensed=True)
npt.assert_equal(obs, np.array([4.2, 12.0, 0.01]))
# Ensure dm hasn't changed after calling permute() on it a couple of
# times.
self.assertEqual(self.dm_3x3, dm_copy)
def test_permute_not_condensed(self):
obs = self.dm_1x1.permute()
self.assertEqual(obs, self.dm_1x1)
self.assertFalse(obs is self.dm_1x1)
obs = self.dm_2x2.permute()
self.assertEqual(obs, self.dm_2x2)
self.assertFalse(obs is self.dm_2x2)
np.random.seed(0)
exp = DistanceMatrix([[0, 12, 4.2],
[12, 0, 0.01],
[4.2, 0.01, 0]], self.dm_3x3.ids)
obs = self.dm_3x3.permute()
self.assertEqual(obs, exp)
exp = DistanceMatrix([[0, 4.2, 12],
[4.2, 0, 0.01],
[12, 0.01, 0]], self.dm_3x3.ids)
obs = self.dm_3x3.permute()
self.assertEqual(obs, exp)
def test_eq(self):
# Compare DistanceMatrix to DissimilarityMatrix, where both have the
# same data and IDs.
eq_dm = DissimilarityMatrix(self.dm_3x3_data, ['a', 'b', 'c'])
self.assertTrue(self.dm_3x3 == eq_dm)
self.assertTrue(eq_dm == self.dm_3x3)
def test_to_series_1x1(self):
series = self.dm_1x1.to_series()
exp = pd.Series([], index=[])
assert_series_almost_equal(series, exp)
def test_to_series_2x2(self):
series = self.dm_2x2.to_series()
exp = pd.Series([0.123], index=pd.Index([('a', 'b')]))
assert_series_almost_equal(series, exp)
def test_to_series_4x4(self):
dm = DistanceMatrix([
[0.0, 0.2, 0.3, 0.4],
[0.2, 0.0, 0.5, 0.6],
[0.3, 0.5, 0.0, 0.7],
[0.4, 0.6, 0.7, 0.0]], ['a', 'b', 'c', 'd'])
series = dm.to_series()
exp = pd.Series([0.2, 0.3, 0.4, 0.5, 0.6, 0.7],
index=pd.Index([('a', 'b'), ('a', 'c'), ('a', 'd'),
('b', 'c'), ('b', 'd'), ('c', 'd')]))
assert_series_almost_equal(series, exp)
def test_to_series_default_ids(self):
series = DistanceMatrix(self.dm_2x2_data).to_series()
exp = pd.Series([0.123], index=pd.Index([('0', '1')]))
assert_series_almost_equal(series, exp)
def test_validate_asym_shape(self):
# first check it actually likes good matrices
data_good = np.array([[0., 42.], [42., 0.]])
data_sym, data_hollow = is_symmetric_and_hollow(data_good)
self.assertEqual(data_sym, True)
del data_sym
self.assertEqual(data_hollow, True)
del data_hollow
data_sym = skbio.stats.distance._utils.is_symmetric(data_good)
self.assertEqual(data_sym, True)
del data_sym
data_hollow = skbio.stats.distance._utils.is_hollow(data_good)
self.assertEqual(data_hollow, True)
del data_hollow
self.dm_3x3._validate_shape(data_good)
del data_good
# _validate_shap checks just the shape, not the content
bad_data = np.array([[1., 2.], [3., 4.]])
data_sym, data_hollow = is_symmetric_and_hollow(bad_data)
self.assertEqual(data_sym, False)
del data_sym
self.assertEqual(data_hollow, False)
del data_hollow
data_sym = skbio.stats.distance._utils.is_symmetric(bad_data)
self.assertEqual(data_sym, False)
del data_sym
data_hollow = skbio.stats.distance._utils.is_hollow(bad_data)
self.assertEqual(data_hollow, False)
del data_hollow
self.dm_3x3._validate_shape(bad_data)
del bad_data
# re-try with partially bad data
bad_data = np.array([[0., 2.], [3., 0.]])
data_sym, data_hollow = is_symmetric_and_hollow(bad_data)
self.assertEqual(data_sym, False)
del data_sym
self.assertEqual(data_hollow, True)
del data_hollow
data_sym = skbio.stats.distance._utils.is_symmetric(bad_data)
self.assertEqual(data_sym, False)
del data_sym
data_hollow = skbio.stats.distance._utils.is_hollow(bad_data)
self.assertEqual(data_hollow, True)
del data_hollow
self.dm_3x3._validate_shape(bad_data)
del bad_data
class RandomDistanceMatrixTests(TestCase):
def test_default_usage(self):
exp = DistanceMatrix(np.asarray([[0.0]]), ['1'])
obs = randdm(1)
self.assertEqual(obs, exp)
obs = randdm(2)
self.assertEqual(obs.shape, (2, 2))
self.assertEqual(obs.ids, ('1', '2'))
obs1 = randdm(5)
num_trials = 10
found_diff = False
for _ in range(num_trials):
obs2 = randdm(5)
if obs1 != obs2:
found_diff = True
break
self.assertTrue(found_diff)
def test_large_matrix_for_symmetry(self):
obs3 = randdm(100)
self.assertEqual(obs3, obs3.T)
def test_ids(self):
ids = ['foo', 'bar', 'baz']
obs = randdm(3, ids=ids)
self.assertEqual(obs.shape, (3, 3))
self.assertEqual(obs.ids, tuple(ids))
def test_constructor(self):
exp = DissimilarityMatrix(np.asarray([[0.0]]), ['1'])
obs = randdm(1, constructor=DissimilarityMatrix)
self.assertEqual(obs, exp)
self.assertEqual(type(obs), DissimilarityMatrix)
def test_random_fn(self):
def myrand(num_rows, num_cols):
# One dm to rule them all...
data = np.empty((num_rows, num_cols))
data.fill(42)
return data
exp = DistanceMatrix(np.asarray([[0, 42, 42], [42, 0, 42],
[42, 42, 0]]), ['1', '2', '3'])
obs = randdm(3, random_fn=myrand)
self.assertEqual(obs, exp)
def test_invalid_input(self):
# Invalid dimensions.
with self.assertRaises(DissimilarityMatrixError):
randdm(0)
# Invalid dimensions.
with self.assertRaises(ValueError):
randdm(-1)
# Invalid number of IDs.
with self.assertRaises(DissimilarityMatrixError):
randdm(2, ids=['foo'])
class CategoricalStatsHelperFunctionTests(TestCase):
def setUp(self):
self.dm = DistanceMatrix([[0.0, 1.0, 2.0],
[1.0, 0.0, 3.0],
[2.0, 3.0, 0.0]], ['a', 'b', 'c'])
self.grouping = [1, 2, 1]
# Ordering of IDs shouldn't matter, nor should extra IDs.
self.df = pd.read_csv(
io.StringIO('ID,Group\nb,Group2\na,Group1\nc,Group1\nd,Group3'),
index_col=0)
self.df_missing_id = pd.read_csv(
io.StringIO('ID,Group\nb,Group2\nc,Group1'), index_col=0)
def test_preprocess_input_with_valid_input(self):
# Should obtain same result using grouping vector or data frame.
exp = (3, 2, np.array([0, 1, 0]),
(np.array([0, 0, 1]), np.array([1, 2, 2])),
np.array([1., 2., 3.]))
obs = _preprocess_input(self.dm, self.grouping, None)
npt.assert_equal(obs, exp)
obs = _preprocess_input(self.dm, self.df, 'Group')
npt.assert_equal(obs, exp)
def test_preprocess_input_raises_error(self):
# Requires a DistanceMatrix.
with self.assertRaises(TypeError):
_preprocess_input(
DissimilarityMatrix([[0, 2], [3, 0]], ['a', 'b']),
[1, 2], None)
# Requires column if DataFrame.
with self.assertRaises(ValueError):
_preprocess_input(self.dm, self.df, None)
# Cannot provide column if not data frame.
with self.assertRaises(ValueError):
_preprocess_input(self.dm, self.grouping, 'Group')
# Column must exist in data frame.
with self.assertRaises(ValueError):
_preprocess_input(self.dm, self.df, 'foo')
# All distance matrix IDs must be in data frame.
with self.assertRaises(ValueError):
_preprocess_input(self.dm, self.df_missing_id, 'Group')
# Grouping vector length must match number of objects in dm.
with self.assertRaises(ValueError):
_preprocess_input(self.dm, [1, 2], None)
# Grouping vector cannot have only unique values.
with self.assertRaises(ValueError):
_preprocess_input(self.dm, [1, 2, 3], None)
# Grouping vector cannot have only a single group.
with self.assertRaises(ValueError):
_preprocess_input(self.dm, [1, 1, 1], None)
def test_run_monte_carlo_stats_with_permutations(self):
obs = _run_monte_carlo_stats(lambda e: 42, self.grouping, 50)
npt.assert_equal(obs, (42, 1.0))
def test_run_monte_carlo_stats_no_permutations(self):
obs = _run_monte_carlo_stats(lambda e: 42, self.grouping, 0)
npt.assert_equal(obs, (42, np.nan))
def test_run_monte_carlo_stats_invalid_permutations(self):
with self.assertRaises(ValueError):
_run_monte_carlo_stats(lambda e: 42, self.grouping, -1)
if __name__ == '__main__':
main()
|
the-stack_106_28262 |
# written by Yang Li for the Leafcutter repo
# forked by mdshw5 and converted to Python3
# https://github.com/mdshw5/leafcutter/blob/master/scripts/leafcutter_cluster_regtools.py
# requires regtools installation
# https://github.com/griffithlab/regtools
# /home/yangili1/tools/regtools/build/regtools junctions extract -a 8 -i 50 -I 500000 bamfile.bam -o outfile.junc
# Using regtools speeds up the junction extraction step by an order of magnitude or more
import sys
import tempfile
import os
import gzip
import shutil
def main(options,libl):
if options.cluster == None:
pool_junc_reads(libl, options)
refine_clusters(options)
sort_junctions(libl, options)
merge_junctions(options)
get_numers(options)
def pool_junc_reads(flist, options):
outPrefix = options.outprefix
rundir = options.rundir
maxIntronLen = int(options.maxintronlen)
checkchrom = options.checkchrom
outFile = "%s/%s_pooled"%(rundir,outPrefix)
chromLst = ["chr%d"%x for x in range(1,23)]+['chrX','chrY']+["%d"%x for x in range(1,23)]+['X','Y']
by_chrom = {}
for libl in flist:
lib = libl.strip()
if not os.path.isfile(lib):
continue
if options.verbose:
sys.stderr.write("scanning %s...\n"%lib)
for ln in open(lib):
lnsplit=ln.split()
if len(lnsplit)<6:
sys.stderr.write("Error in %s \n" % lib)
continue
chrom, A, B, dot, counts, strand, rA,rb, rgb, blockCount, blockSize, blockStarts = lnsplit
if int(blockCount) > 2:
print(ln)
continue
# regtools -s 0 (unstranded) now puts "?" in strand field when strand is ambiguous
if strand == "?": continue
if checkchrom and (chrom not in chromLst): continue
Aoff, Boff = blockSize.split(",")
A, B = int(A)+int(Aoff), int(B)-int(Boff)+1
if B-A > int(maxIntronLen): continue
try: by_chrom[(chrom,strand)][(A,B)] = int(counts) + by_chrom[(chrom,strand)][(A,B)]
except:
try: by_chrom[(chrom,strand)][(A,B)] = int(counts)
except: by_chrom[(chrom,strand)] = {(A,B):int(counts)}
fout = open(outFile, 'w')
Ncluster = 0
sys.stderr.write("Parsing...\n")
#print("HOW MANY CHROMOSOMES ARE THERE ANYWAY?")
#print(by_chrom)
#print("there are %d elements in by_chrom"%len(by_chrom))
for chrom in by_chrom:
read_ks = [k for k,v in list(by_chrom[chrom].items()) if v >= 3] # a junction must have at least 3 reads
read_ks.sort()
sys.stderr.write("%s:%s.."%chrom)
if len(read_ks) == 0:
continue # weird test case for toy data with only 1 gene - two chroms but one is empty after filtering
#print("LOOK HERE BOYO")
#print(read_ks)
clu = cluster_intervals(read_ks)[0]
for cl in clu:
if len(cl) > 1: # if cluster has more than one intron
buf = '%s:%s '%chrom
for interval, count in [(x, by_chrom[chrom][x]) for x in cl]:
buf += "%d:%d" % interval + ":%d"%count+ " "
fout.write(buf+'\n')
Ncluster += 1
sys.stderr.write("\nWrote %d clusters..\n"%Ncluster)
fout.close()
def sort_junctions(libl, options):
chromLst = ["chr%d"%x for x in range(1,23)]+['chrX','chrY']+["%d"%x for x in range(1,23)]+['X','Y']
outPrefix = options.outprefix
rundir = options.rundir
checkchrom = options.checkchrom
if options.cluster == None:
refined_cluster = "%s/%s_refined"%(rundir,outPrefix)
else:
refined_cluster = options.cluster
runName = "%s/%s"%(rundir, outPrefix)
exons, cluExons = {}, {}
cluN = 0
for ln in open(refined_cluster):
chrom = ln.split()[0]
cluN += 1
for exon in ln.split()[1:]:
A, B, count = exon.split(":")
if chrom not in exons:
exons[chrom] = {}
exons[chrom][(int(A),int(B))] = cluN
if cluN not in cluExons:
cluExons[cluN] = []
cluExons[cluN].append((chrom, A, B))
merges = {}
for ll in libl:
lib=ll.rstrip()
if not os.path.isfile(lib):
continue
libN = lib
if libN not in merges:
merges[libN] = []
merges[libN].append(lib)
fout_runlibs = open(runName+"_sortedlibs",'w')
for libN in merges:
libName = "%s/%s"%(rundir,libN.split('/')[-1])
by_chrom = {}
foutName = libName+'.%s.sorted.gz'%(runName.split("/")[-1])
fout_runlibs.write(foutName+'\n')
if options.verbose:
sys.stderr.write("Sorting %s..\n"%libN)
if len(merges[libN]) > 1:
if options.verbose:
sys.stderr.write("merging %s...\n"%(" ".join(merges[libN])))
else:
pass
header_string = "chrom %s\n"%libN.split("/")[-1].split(".junc")[0]
fout = gzip.open(foutName, 'wb')
fout.write(header_string.encode('utf-8') )
#fout = gzip.open(foutName,'wb')
#fout.write("chrom %s\n"%libN.split("/")[-1].split(".junc")[0])
for lib in merges[libN]:
for ln in open(lib):
lnsplit=ln.split()
if len(lnsplit)<6:
sys.stderr.write("Error in %s \n" % lib)
continue
chrom, A, B, dot, count, strand, rA,rb, rgb, blockCount, blockSize, blockStarts = lnsplit
if int(blockCount) > 2:
print(ln)
continue
if checkchrom and (chrom not in chromLst): continue
Aoff, Boff = blockSize.split(",")
A, B = int(A)+int(Aoff), int(B)-int(Boff)+1
chrom = (chrom,strand)
if chrom not in by_chrom:
by_chrom[chrom] = {}
intron = (A, B)
if intron in by_chrom[chrom]:
by_chrom[chrom][intron] += int(count)
else:
by_chrom[chrom][intron] = int(count)
for clu in cluExons:
buf = []
ks = cluExons[clu]
ks.sort()
tot = 0
for exon in ks:
chrom, start, end = exon
chrom = tuple(chrom.split(":"))
start, end = int(start), int(end)
if chrom not in by_chrom:
pass
elif (start,end) in by_chrom[chrom]:
tot += by_chrom[chrom][(start,end)]
for exon in ks:
chrom, start, end = exon
start, end = int(start), int(end)
chrom = tuple(chrom.split(":"))
chromID, strand = chrom
if chrom not in by_chrom:
buf.append("%s:%d:%d:clu_%d_%s 0/%d\n"%(chromID,start, end,clu, strand, tot))
elif (start,end) in by_chrom[chrom]:
buf.append("%s:%d:%d:clu_%d_%s %d/%d\n"%(chromID,start, end, clu,strand, by_chrom[chrom][(start,end)], tot))
else:
buf.append("%s:%d:%d:clu_%d_%s 0/%d\n"%(chromID,start, end,clu,strand, tot))
fout.write("".join(buf).encode('utf-8') )
fout.close()
fout_runlibs.close()
def refine_clusters(options):
outPrefix = options.outprefix
rundir = options.rundir
minratio = float(options.mincluratio)
minreads = int(options.minclureads)
inFile = "%s/%s_pooled"%(rundir,outPrefix)
outFile = "%s/%s_refined"%(rundir,outPrefix)
fout = open(outFile,'w')
Ncl = 0
for ln in open(inFile):
clu = []
totN = 0
chrom = ln.split()[0]
for ex in ln.split()[1:]:
A, B, N = ex.split(":")
clu.append(((int(A),int(B)), int(N)))
totN += int(N)
if totN < minreads: continue
#print "CLU",clu
#print "linked",refine_linked(clu)
#print '\n\n'
for cl in refine_linked(clu):
rc = refine_cluster(cl,minratio, minreads)
if len(rc) > 0:
for clu in rc:
buf = '%s ' % chrom
for interval, count in clu:
buf += "%d:%d" % interval + ":%d"%(count)+ " "
Ncl += 1
fout.write(buf+'\n')
sys.stderr.write("Split into %s clusters...\n"%Ncl)
fout.close()
def merge_junctions(options):
''' function to merge junctions '''
outPrefix = options.outprefix
rundir = options.rundir
fnameout = "%s/%s"%(rundir,outPrefix)
flist = "%s/%s_sortedlibs"%(rundir, outPrefix)
lsts = []
for ln in open(flist):
lsts.append(ln.strip())
if options.verbose:
sys.stderr.write("merging %d junction files...\n"%(len(lsts)))
# Change 300 if max open file is < 300
N = min([300, max([100, int(len(lsts)**(0.5))])])
tmpfiles = []
while len(lsts) > 1:
clst = []
for i in range(0,(len(lsts)//N)+1):
lst = lsts[N*i:N*(i+1)]
if len(lst) > 0:
clst.append(lst)
lsts = []
for lst in clst:
if len(lst) == 0: continue
tmpfile = tempfile.mktemp()
os.mkdir(tmpfile)
foutname = tmpfile+"/tmpmerge.gz"
fout = gzip.open(foutname,'w')
merge_files(lst, fout, options)
lsts.append(foutname)
tmpfiles.append(foutname)
fout.close()
shutil.move(lsts[0], fnameout+"_perind.counts.gz")
def merge_files(fnames, fout, options):
fopen = []
for fname in fnames:
if fname[-3:] == ".gz":
fopen.append(gzip.open(fname, "rt")) # rt mode opens gzipped file as text file
else:
fopen.append(open(fname))
finished = False
N = 0
while not finished:
N += 1
if N % 50000 == 0:
sys.stderr.write(".")
buf = []
for f in fopen:
ln = f.readline().split()
if len(ln) == 0:
finished = True
break
chrom = ln[0]
data = ln[1:]
if len(buf) == 0:
buf.append(chrom)
buf += data
if len(buf) > 0:
if buf[0] == "chrom":
if options.verbose:
sys.stderr.write("merging %d files"%(len(buf)-1))
# problematic line - buf is stored as bytes rather than a string
out_string = " ".join(buf)+'\n'
fout.write(out_string.encode('utf-8') )
else:
break
sys.stderr.write(" done.\n")
for fin in fopen:
fin.close()
def cluster_intervals(E):
''' Clusters intervals together. '''
E.sort()
#print(len(E))
current = E[0]
Eclusters, cluster = [], []
i = 0
while i < len(E):
if overlaps(E[i], current):
cluster.append(E[i])
else:
Eclusters.append(cluster)
cluster = [E[i]]
current = (E[i][0], max([current[1], E[i][1]]))
i += 1
if len(cluster) > 0:
Eclusters.append(cluster)
return Eclusters, E
def overlaps(A,B):
'''
Checks if A and B overlaps
'''
if A[1] < B[0] or B[1] < A[0]:
return False
else: return True
def refine_linked(clusters):
unassigned = [x for x in clusters[1:]]
current = [clusters[0]]
splicesites = set([current[0][0][0],current[0][0][1]])
newClusters = []
while len(unassigned) > 0:
finished = False
while not finished:
finished = True
torm = []
for intron in unassigned:
inter, count = intron
start, end = inter
if start in splicesites or end in splicesites:
current.append(intron)
splicesites.add(start)
splicesites.add(end)
finished = False
torm.append(intron)
for intron in torm:
unassigned.remove(intron)
newClusters.append(current)
current = []
if len(unassigned) > 0:
current = [unassigned[0]]
splicesites = set([current[0][0][0],current[0][0][1]])
unassigned = unassigned[1:]
return newClusters
def refine_cluster(clu, cutoff, readcutoff):
''' for each exon in the cluster compute the ratio of reads, if smaller than cutoff,
remove and recluster '''
remove = []
dic = {}
intervals = []
reCLU = False
totN = 0
for inter, count in clu:
totN += count
for inter, count in clu:
if (count/float(totN) >= cutoff and count >= readcutoff):
intervals.append(inter)
dic[inter] = count
else:
reCLU = True
if len(intervals) == 0: return []
# This makes sure that after trimming, the clusters are still good
Atmp, B = cluster_intervals(intervals)
A = []
for cl in Atmp:
for c in refine_linked([(x,0) for x in cl]):
if len(c) > 0:
A.append([x[0] for x in c])
if len(A) == 1:
rc = [(x, dic[x]) for x in A[0]]
if len(rc) > 1:
if reCLU:
return refine_cluster([(x, dic[x]) for x in A[0]], cutoff, readcutoff)
else:
return [[(x, dic[x]) for x in A[0]]]
else:
return []
NCs = []
for c in A:
if len(c) > 1:
NC = refine_cluster([(x, dic[x]) for x in c], cutoff, readcutoff)
NCs += NC
return NCs
def get_numers(options):
outPrefix = options.outprefix
rundir = options.rundir
fname = "%s/%s_perind.counts.gz"%(rundir,outPrefix)
fnameout = "%s/%s_perind_numers.counts.gz"%(rundir,outPrefix)
input_file=gzip.open(fname, 'rt') # read in as text
fout = gzip.open(fnameout,'w')
first_line=True
for l in input_file:
if first_line:
header_string = " ".join(l.strip().split(" ")[1:])+'\n'
fout.write(header_string.encode('utf-8')) # print the sample names
first_line=False
else:
l=l.strip()
words=l.split(" ")
words_string = words[0]+ " "+ " ".join( [ g.split("/")[0] for g in words[1:] ] ) +'\n'
fout.write(words_string.encode('utf-8'))
input_file.close()
fout.close()
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-j", "--juncfiles", dest="juncfiles",
help="text file with all junction files to be processed")
parser.add_option("-o", "--outprefix", dest="outprefix", default = 'leafcutter',
help="output prefix (default leafcutter)")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
parser.add_option("-r", "--rundir", dest="rundir", default='./',
help="write to directory (default ./)")
parser.add_option("-l", "--maxintronlen", dest="maxintronlen", default = 100000,
help="maximum intron length in bp (default 100,000bp)")
parser.add_option("-m", "--minclureads", dest="minclureads", default = 30,
help="minimum reads in a cluster (default 30 reads)")
parser.add_option("-p", "--mincluratio", dest="mincluratio", default = 0.001,
help="minimum fraction of reads in a cluster that support a junction (default 0.001)")
parser.add_option("-c", "--cluster", dest="cluster", default = None,
help="refined cluster file when clusters are already made")
parser.add_option("-k", "--checkchrom", dest="checkchrom", default = True,
help="check that the chromosomes are well formated e.g. chr1, chr2, ..., or 1, 2, ...")
(options, args) = parser.parse_args()
if options.juncfiles == None:
sys.stderr.write("Error: no junction file provided...\n")
exit(0)
# Get the junction file list
libl = []
for junc in open(options.juncfiles):
junc = junc.strip()
try:
open(junc)
except:
sys.stderr.write("%s does not exist... check your junction files.\n"%junc)
exit(0)
libl.append(junc)
main(options, libl)
|
the-stack_106_28264 | # flake8: noqa
import numpy
import numpy as np
from skimage.data import camera
from skimage.metrics import peak_signal_noise_ratio as psnr
from skimage.metrics import structural_similarity as ssim
from aydin.io.datasets import (
normalise,
add_noise,
dots,
lizard,
pollen,
newyork,
characters,
)
from aydin.it.classic_denoisers.dictionary_fixed import (
calibrate_denoise_dictionary_fixed,
)
from aydin.util.log.log import Log
def demo_dictionary_fixed(image, display=True):
"""
Demo for self-supervised denoising using camera image with synthetic noise
"""
Log.enable_output = True
Log.set_log_max_depth(5)
image = normalise(image.astype(np.float32))
noisy = add_noise(image)
function, parameters, memreq = calibrate_denoise_dictionary_fixed(
noisy, display_dictionary=False
)
denoised = function(noisy, **parameters)
image = numpy.clip(image, 0, 1)
noisy = numpy.clip(noisy, 0, 1)
denoised = numpy.clip(denoised, 0, 1)
psnr_noisy = psnr(image, noisy)
ssim_noisy = ssim(image, noisy)
psnr_denoised = psnr(image, denoised)
ssim_denoised = ssim(image, denoised)
print(" noisy :", psnr_noisy, ssim_noisy)
print("dictionary denoised:", psnr_denoised, ssim_denoised)
if display:
import napari
with napari.gui_qt():
viewer = napari.Viewer()
viewer.add_image(image, name='image')
viewer.add_image(noisy, name='noisy')
viewer.add_image(denoised, name='denoised')
return ssim_denoised
if __name__ == "__main__":
newyork_image = newyork()
demo_dictionary_fixed(newyork_image)
characters_image = characters()
demo_dictionary_fixed(characters_image)
pollen_image = pollen()
demo_dictionary_fixed(pollen_image)
lizard_image = lizard()
demo_dictionary_fixed(lizard_image)
dots_image = dots()
demo_dictionary_fixed(dots_image)
camera_image = camera()
demo_dictionary_fixed(camera_image)
|
the-stack_106_28266 | #!/usr/bin/env python3
import rospy
import json
from lg_msg_defs.srv import USCSMessage
from lg_msg_defs.srv import DesiredState
from interactivespaces_msgs.msg import GenericMessage
from std_msgs.msg import String
from appctl_msg_defs.msg import Mode
from lg_common.helpers import run_with_influx_exception_handler
NODE_NAME = 'state_setter'
class StateSetter(object):
def __init__(self, state_pub, display_url_pub, kiosk_url_pub, runway_pub, last_uscs_service):
self.state_pub = state_pub
self.display_url_pub = display_url_pub
self.kiosk_url_pub = kiosk_url_pub
self.runway_pub = runway_pub
self.last_uscs_service = last_uscs_service
self.state_display = None
self.state_kiosk = None
self.state = None
def get_current_state(self):
state = self.last_uscs_service().message
try:
return json.loads(state)
except Exception:
rospy.logerr("Last state from /uscs/message service returned non-json parsable (%s)" % state)
return {}
def handle_state_setting(self, msg):
self.state = None
try:
state = json.loads(msg.data)
except Exception:
rospy.logerr('Error with the state message, non json format:\n%s' % msg.data)
return
self.state = state
# if the current state is tactile and the new state is tactile, then
# we follow a special path, also if just the new state is tactile we
# follow another but different special path
#if self.handle_current_and_new_tactile(state):
# self._clear_state()
# return
#if self.handle_new_state_tactile(state):
# return
self.publish_uscs(state)
self._clear_state()
def _clear_state(self):
# set state to none since we don't need to store it when
# we change the url / handle tactile ourself
self.state = self.state_display = self.state_kiosk = None
def handle_current_and_new_tactile(self, new_state):
# if the current and new state are tactile, only urls / runway
# cards need to be changed / emitted
current_state = self.get_current_state()
if not self.is_tactile(current_state) or not self.is_tactile(new_state):
return False
if self.valid_runway_card(state.get('runway_card', None)):
self.runway_pub.publish(state['runway_card'])
else:
# if the runway card isn't valid we want to just set the urls
self.kiosk_pub.publish(self.get_kiosk_url(state))
self.display_pub.publish(self.get_display_url(state))
return True
def handle_new_state_tactile(self, new_state):
self.publish_uscs(new_state)
# store state so when the kiosk and display are finished
# loading they can query their own runway cards
self.state = self.state_kiosk = self.state_display = new_state
def publish_uscs(self, state):
self.state_pub.publish(self.make_director(state))
def desired_state(self, req):
state = self.state
if state is None:
return ''
if req.node == '42-a' or req.node == 'display':
if self.state_display is None:
return ''
self.state_display = None
return json.dumps(state)
if req.node == '42-b' or req.node == 'kiosk':
if self.state_kiosk is None:
return ''
self.state_kiosk = None
return json.dumps(state)
return ''
def make_director(self, uscs_message):
# makes a generic message and returns it
ret = GenericMessage()
ret.type = 'json'
try:
ret.message = json.dumps(uscs_message)
except Exception:
rospy.logerr('Could not dump state message into json...')
ret.message = ''
return ret
def valid_runway_card(self, runway_card):
# runway cards can sometimes be "None" as a string
if runway_card is None and runway_card == 'None':
return False
if runway_card[11] == '3':
return False
return True
def handle_tactile(self, new_state):
if new_state.get('runway_card', 'None') != 'None' and \
new_state.get('runway_card') is not None and \
new_state.get('runway_card')[11] != '3':
self.runway_pub.publish(new_state['runway_card'])
return
self.publish_urls(new_state['kiosk_url'], new_state['display_url'])
def publish_urls(self, kiosk_url, display_url):
self.kiosk_url_pub.publish(kiosk_url)
self.display_url_pub.publish(display_url)
def grab_urls(self, state):
# grabs urls which are the only asset when the
# activity is "browser" If there are more assets
# then we ignore the window
urls = []
for window in state.get('windows', []):
if window.get('activity') != 'browser':
continue
if len(window.get('assets', [])) == 1:
urls.append(window['assets'][0])
return urls
def _is_tactile_url(self, urls):
# checking that the length of the filter is not zero, if it is then no urls
# matched those that should be tactile
return len([url for url in urls if 'maps.google.com' in url or 'google.com/maps' in url]) != 0
def is_tactile(self, state):
self._is_tactile_url(self.grab_urls(state))
def main():
rospy.init_node(NODE_NAME)
state_pub = rospy.Publisher('/director/scene', GenericMessage, queue_size=10)
runway_pub = rospy.Publisher('/portal_kiosk/runway_change', String, queue_size=10)
display_pub = rospy.Publisher('/display/switch', String, queue_size=10)
kiosk_pub = rospy.Publisher('/kiosk/switch', String, queue_size=10)
last_uscs_service = rospy.ServiceProxy('/uscs/message', USCSMessage, persistent=False)
state_setter = StateSetter(state_pub, display_pub, kiosk_pub, runway_pub, last_uscs_service)
rospy.Service('/state_setter/desired_state', DesiredState, state_setter.desired_state)
rospy.Subscriber('/state_setter/set_state', String, state_setter.handle_state_setting)
rospy.spin()
if __name__ == '__main__':
run_with_influx_exception_handler(main, NODE_NAME)
|
the-stack_106_28267 | import sys
import h5py
import numpy as np
from pydata.increment import __next_index__
if 'pyslave' in sys.modules :
from pyslave import __slave_disp__ as disp
else:
disp = print
class createh5(h5py.File):
"""Create a new H5 file to save data.
Use the append_dataset to add data to the file."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__data_counter__ = dict()
self.fname = args[0]
def __next_dataset__(self, dataset, ndigits):
if not dataset in self.__data_counter__ :
counter = __next_index__(dataset,'',self.keys())
else:
counter = self.__data_counter__[dataset] + 1
return counter, dataset + str(counter).zfill(ndigits)
def append(self, data, **kwargs):
"""Create a new dataset with automatic increment of the name and save data to it.
N.B. : data is an instance of the pyslave.datadict. Data class
Attributes can be added."""
data.save_h5(self,)
counter, dataset_name = self.__next_dataset__(dataset, ndigits)
ds = super().create_dataset(dataset_name, data=data.__data__, **kwargs)
attributes = data.__attributes__.copy()
if attrs : attributes.update(attrs)
self.__data_counter__[dataset] = counter
for k,v in attributes.items() :
ds.attrs[k] = v
self.flush()
msg = 'Data saved to {0} in dataset {1}.'.format(self.fname, dataset_name)
disp(msg)
class loadh5:
"""Load all datasets of a H5 file into numpy arrays.
Example :
d = loadh5('Sij_vs_Temperature.h5')
print(d)
Loaded from Sij_vs_Temperature.h5 with 70 datasets
Data fields : freq,Sij
Attributes : T
plot(T, abs(Sij).max(1))
"""
def __init__(self, filename,print_file = True):
with h5py.File(filename,'r') as f:
keys = list(f.keys())
length = len(keys)
dataset = f[keys[0]]
attr_keys = list(dataset.attrs.keys())
data_keys = dataset.dtype.names
# Build attribute array
all_attrs = { k:np.empty(length, dtype=type(dataset.attrs[k]) ) for k in attr_keys}
all_data = { k:np.empty((length, len(dataset[k])), dtype=dataset[k].dtype) for k in data_keys}
for i,d in enumerate(f.values()):
for k in attr_keys : all_attrs[k][i] = d.attrs[k]
for k in data_keys : all_data[k][i] = d[k]
for k in attr_keys:
setattr(self, k, all_attrs[k])
for k in data_keys:
setattr(self, k, all_data[k])
self.attr_keys = attr_keys
self.data_keys = data_keys
self.length = length
self.filename = filename
if print_file:
print(self)
def __repr__(self):
s = "Loaded from {0} with {1} datasets\n".format(self.filename, self.length)
s += "Data fields : " + ', '.join(self.data_keys) + '\n'
s += "Attributes : " + ', '.join(self.attr_keys)
return s
|
the-stack_106_28268 | #!/usr/bin/env python
# -*- noplot -*-
import time
from pylab import *
def get_memory():
"Simulate a function that returns system memory"
return 100*(0.5 + 0.5*sin(0.5*pi*time.time()))
def get_cpu():
"Simulate a function that returns cpu usage"
return 100*(0.5 + 0.5*sin(0.2*pi*(time.time() - 0.25)))
def get_net():
"Simulate a function that returns network bandwidth"
return 100*(0.5 + 0.5*sin(0.7*pi*(time.time() - 0.1)))
def get_stats():
return get_memory(), get_cpu(), get_net()
# turn interactive mode on for dynamic updates. If you aren't in
# interactive mode, you'll need to use a GUI event handler/timer.
ion()
fig, ax = plt.subplots()
ind = arange(1, 4)
pm, pc, pn = bar(ind, get_stats())
centers = ind + 0.5*pm.get_width()
pm.set_facecolor('r')
pc.set_facecolor('g')
pn.set_facecolor('b')
ax.set_xlim([0.5, 4])
ax.set_xticks(centers)
ax.set_ylim([0, 100])
ax.set_xticklabels(['Memory', 'CPU', 'Bandwidth'])
ax.set_ylabel('Percent usage')
ax.set_title('System Monitor')
for i in range(200): # run for a little while
m, c, n = get_stats()
pm.set_height(m)
pc.set_height(c)
pn.set_height(n)
ax.set_ylim([0, 100])
draw()
|
the-stack_106_28269 | import numpy as np
import logging
from benchmarker import benchmark
logger = logging.getLogger('expNN_BLAS_level_2_to_level_3')
@benchmark
def naive_loop(A, B, C):
for i in range(C.shape[1]):
C[:, i] = A @ B[:, i]
return C
@benchmark
def recommended_loop(A, B, C):
C = A @ B
return C
def expNN_BLAS_level_2_to_level_3(b, n):
A = np.random.randn(n, n)
B = np.random.randn(n, n)
C = np.random.randn(n, n)
res1 = b.benchmark("loop_translation_nai", naive_loop, A, B, C)
res2 = b.benchmark("loop_translation_rec", recommended_loop, A, B, C)
logger.info('LoopTranslation correctness: {}'.format(np.allclose(res1, res2)))
|
the-stack_106_28272 | # %%
import torch
import math
from UnarySim.kernel.tanh import tanhPN
from UnarySim.stream.gen import RNG, SourceGen, BSGen
from UnarySim.metric.metric import ProgError
import matplotlib.pyplot as plt
import time
import math
import numpy as np
# %%
def tanh_fsm_test(bw=8, mode="bipolar", rng="Sobol", depth=4):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
total_cnt = 100
bitwidth = bw
btype = torch.float
rtype=torch.float
stype=torch.float
print("========================================================")
print(mode)
print("========================================================")
# all input values are non-negative
low_bound = 0
if mode == "unipolar":
up_bound = 2**bitwidth
elif mode == "bipolar":
low_bound = 0
up_bound = 2**(bitwidth-1)
input_list = []
for input_val in range(low_bound, up_bound+1, 1):
input_list.append(input_val)
input = torch.tensor(input_list).type(torch.float).div(up_bound).to(device)
output = torch.tanh(input*(2**(depth-1)))
result_pe_total = []
for rand_idx in range(1, total_cnt+1):
outputPE = ProgError(output, mode=mode).to(device)
inputPE = ProgError(input, mode=mode).to(device)
inputSRC = SourceGen(input, bitwidth, mode=mode, rtype=rtype)().to(device)
inputRNG = RNG(bitwidth, rand_idx, rng, rtype)().to(device)
inputBS = BSGen(inputSRC, inputRNG, stype).to(device)
dut_tanh_fsm = tanhPN(mode=mode,
depth=depth).to(device)
with torch.no_grad():
start_time = time.time()
for i in range(2**bitwidth):
input_bs = inputBS(torch.tensor([i]))
inputPE.Monitor(input_bs)
output_bs = dut_tanh_fsm(input_bs)
outputPE.Monitor(output_bs)
# get the result for different rng
result_pe = outputPE()[1].cpu().numpy()
result_pe_total.append(result_pe)
# get the result for different rng
result_pe_total = np.array(result_pe_total)
#######################################################################
# check the error of all simulation
#######################################################################
print("RMSE:{:1.4}".format(math.sqrt(np.mean(result_pe_total**2))))
print("MAE: {:1.4}".format(np.mean(np.abs(result_pe_total))))
print("bias:{:1.4}".format(np.mean(result_pe_total)))
print("max: {:1.4}".format(np.max(result_pe_total)))
print("min: {:1.4}".format(np.min(result_pe_total)))
#######################################################################
# check the error according to input value
#######################################################################
max_total = np.max(result_pe_total, axis=0)
min_total = np.min(result_pe_total, axis=0)
avg_total = np.mean(result_pe_total, axis=0)
axis_len = outputPE()[1].size()[0]
input_x_axis = []
for axis_index in range(axis_len):
input_x_axis.append((axis_index/(axis_len-1)*(up_bound-low_bound)+low_bound)/up_bound)
fig, ax = plt.subplots()
ax.fill_between(input_x_axis, max_total, avg_total, facecolor="red", alpha=0.75)
ax.fill_between(input_x_axis, avg_total, min_total, facecolor="blue", alpha=0.75)
ax.plot(input_x_axis, avg_total, label='Avg error', color="black", linewidth=0.3)
plt.tight_layout()
plt.xlabel('Input value')
plt.ylabel('Output error')
plt.xticks(np.arange(0, 1.1, step=0.5))
# ax.xaxis.set_ticklabels([])
plt.xlim(0, 1)
plt.yticks(np.arange(-1.1, 1.1, step=0.2))
# ax.yaxis.set_ticklabels([])
plt.ylim(-1, 1)
plt.grid(b=True, which="both", axis="y", linestyle="--", color="grey", linewidth=0.3)
fig.set_size_inches(4, 4)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.show()
plt.close()
# %%
tanh_fsm_test(8, "bipolar", "Sobol", depth=2)
# %%
tanh_fsm_test(8, "bipolar", "SYS", depth=2)
# %%
tanh_fsm_test(8, "bipolar", "LFSR", depth=3) |
the-stack_106_28273 | import os
import sys
import psutil
from monk.gluon_prototype import prototype
from monk.compare_prototype import compare
from monk.pip_unit_tests.gluon.common import print_start
from monk.pip_unit_tests.gluon.common import print_status
import mxnet as mx
import numpy as np
from monk.gluon.losses.return_loss import load_loss
def test_loss_l1(system_dict):
forward = True;
test = "test_loss_l1";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
label = np.random.rand(1, 5);
label = mx.nd.array(label);
y = np.random.rand(1, 5);
y = mx.nd.array(y);
gtf.loss_l1();
load_loss(gtf.system_dict);
loss_obj = gtf.system_dict["local"]["criterion"];
loss_val = loss_obj(y, label);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
|
the-stack_106_28274 | import cgi
import json
import re
from django.conf import settings
from django.core.urlresolvers import reverse
from django.shortcuts import render
from django.http import HttpResponse, \
HttpResponseForbidden, HttpResponseNotFound, HttpResponseServerError
from django.template.loader import render_to_string
from django.contrib.sites.models import Site
from tardis.tardis_portal.models import \
ExperimentParameterSet
from tardis.tardis_portal.ParameterSetManager import ParameterSetManager
def render_response_index(request, *args, **kwargs):
return render(request, *args, **kwargs)
def render_response_search(request, url, c):
from tardis.search.views import getNewSearchDatafileSelectionForm
links = {}
for app in settings.INSTALLED_APPS:
if app.startswith('tardis.apps.'):
view = '%s.views.search' % app
try:
links[app.split('.')[2]] = reverse(view)
except:
pass
c['searchDatafileSelectionForm'] = \
getNewSearchDatafileSelectionForm(request.GET.get('type', None))
c['links'] = links
return render(request, url, c)
def render_error_message(request, message, status=400):
"""
Render a simple text error message in a generic error page.
Any newlines are turned into <br>.
"""
formatted = cgi.escape(message).replace('\n', '<br/>')
return render(request, 'tardis_portal/user_error.html',
{'error_message': formatted}, status=status)
def return_response_not_found(request):
return HttpResponseNotFound(render_response_index(request, '404.html', {}))
def return_response_error_message(request, redirect_path, context):
return HttpResponseServerError(render_response_index(request,
redirect_path, context))
def return_response_error(request):
return HttpResponseForbidden(render_response_index(request, '403.html', {}))
def get_experiment_referer(request, dataset_id):
from tardis.tardis_portal.auth.decorators import get_accessible_experiments_for_dataset
try:
from_url = request.META['HTTP_REFERER']
from_url_split = re.sub('^https?:\/\/', '', from_url).split('/')
domain_url_split = Site.objects.get_current().domain.split('//')
referer = 0
if from_url_split[0] != domain_url_split[1]:
return None
if from_url_split[1] == 'experiment' and from_url_split[2] == 'view':
referer = int(from_url_split[3])
else:
return None
for experiment in get_accessible_experiments_for_dataset(request, dataset_id):
if experiment.id == referer:
return experiment
except:
pass
return None
def render_to_file(template, filename, context):
string_for_output = render_to_string(template, context)
# The render_to_string method returns a unicode string, which will cause
# an error when written to file if the string contain diacritics. We
# need to do a utf-8 encoding before writing to file
# see http://packages.python.org/kitchen/unicode-frustrations.html
open(filename, "w").write(string_for_output.encode('utf8', 'replace'))
class RestfulExperimentParameterSet(object):
'''
Helper class which enables a Backbone.sync-compatible interface to be
created for a ExperimentParameterSet just by specifying a function which
provides the schema and a form.
(A function for the schema is required rather than the actual schema, as
to run unit tests effectively the object needs to be able to create the
schema after instantiation.)
For UI consistency, it's best to make sure the schema has hidden == true.
'''
def __init__(self, schema_func, form_cls):
'''
Takes a schema URI and a Form class.
'''
self.schema_func = schema_func
self.form_cls = form_cls
self.parameter_names = form_cls().fields.keys()
def _get_schema(self):
''' Use schema function to get the schema. '''
return self.schema_func()
schema = property(_get_schema)
def __str__(self):
return "%s for %s into %s" % \
(self.__class__, self.form_cls, self.schema.namespace)
def _get_dict_from_ps(self, ps):
'''
Build dictionary by getting the parameter values from the keys, then
zipping it all together.
'''
psm = ParameterSetManager(ps)
return dict([('id', ps.id)]+ # Use set ID
zip(self.parameter_names,
(psm.get_param(k, True) for k in self.parameter_names)))
def _get_view_functions(self):
context = self
# Collection resource
def list_or_create(request, *args, **kwargs):
if request.method == 'POST':
return context._create(request, *args, **kwargs)
return context._list(request, *args, **kwargs)
# Item resource
def get_or_update_or_delete(request, *args, **kwargs):
if request.method == 'PUT':
return context._update(request, *args, **kwargs)
elif request.method == 'DELETE':
return context._delete(request, *args, **kwargs)
return context._get(request, *args, **kwargs)
return {'list_or_create': list_or_create,
'get_or_update_or_delete': get_or_update_or_delete }
view_functions = property(_get_view_functions)
def _list(self, request, experiment_id):
from tardis.tardis_portal.auth.decorators import has_experiment_access
if not has_experiment_access(request, experiment_id):
return return_response_error(request)
sets = ExperimentParameterSet.objects.filter(schema=self.schema,
experiment__pk=experiment_id)
return HttpResponse(json.dumps([self._get_dict_from_ps(ps)
for ps in sets]),
content_type='application/json; charset=utf-8')
def _get(self, request, experiment_id, ps_id):
from tardis.tardis_portal.auth.decorators import has_experiment_access
if not has_experiment_access(request, experiment_id):
return return_response_error(request)
try:
ps = ExperimentParameterSet.objects.get(schema=self.schema,
experiment__pk=experiment_id,
id=ps_id)
return HttpResponse(json.dumps(self._get_dict_from_ps(ps)),
content_type='application/json; charset=utf-8')
except:
return return_response_not_found(request)
def _create(self, request, experiment_id):
from tardis.tardis_portal.auth.decorators import has_experiment_write
if not has_experiment_write(request, experiment_id):
return return_response_error(request)
form = self.form_cls(json.loads(request.body))
if not form.is_valid():
return HttpResponse('', status=400)
ps = ExperimentParameterSet(experiment_id=experiment_id,
schema=self.schema)
ps.save()
ParameterSetManager(ps).set_params_from_dict(form.cleaned_data)
return HttpResponse(json.dumps(self._get_dict_from_ps(ps)),
content_type='application/json; charset=utf-8',
status=201)
def _update(self, request, experiment_id, ps_id):
from tardis.tardis_portal.auth.decorators import has_experiment_write
if not has_experiment_write(request, experiment_id):
return return_response_error(request)
form = self.form_cls(json.loads(request.body))
if not form.is_valid():
return HttpResponse('', status=400)
try:
ps = ExperimentParameterSet.objects.get(experiment_id=experiment_id,
id=ps_id)
except ExperimentParameterSet.DoesNotExist:
return HttpResponse('', status=404)
ParameterSetManager(ps).set_params_from_dict(form.cleaned_data)
return HttpResponse(json.dumps(self._get_dict_from_ps(ps)),
content_type='application/json; charset=utf-8',
status=201)
def _delete(self, request, experiment_id, ps_id):
from tardis.tardis_portal.auth.decorators import has_experiment_write
if not has_experiment_write(request, experiment_id):
return return_response_error(request)
try:
ps = ExperimentParameterSet.objects.get(experiment_id=experiment_id,
id=ps_id)
except ExperimentParameterSet.DoesNotExist:
return HttpResponse('', status=404)
print (ps.schema_id, self.schema.id, str(self))
obj = self._get_dict_from_ps(ps)
ps.delete()
return HttpResponse(json.dumps(obj),
content_type='application/json; charset=utf-8')
|
the-stack_106_28275 | ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import shutil
import pkg_resources
from jinja2.environment import Template
import cloudify_cli
from .. import env
from .. import local
from ..cli import cfy
from .. import blueprint
from .. import exceptions
from ..config import config
from ..logger import DEFAULT_LOG_FILE
from ..logger import configure_loggers
from ..exceptions import CloudifyCliError
@cfy.command(name='init', short_help='Initialize a working env')
@cfy.argument('blueprint-path', required=False)
@cfy.options.blueprint_filename()
@cfy.options.blueprint_id(required=False, validate=True)
@cfy.options.reset_context
@cfy.options.inputs
@cfy.options.install_plugins
@cfy.options.init_hard_reset
@cfy.options.enable_colors
@cfy.options.common_options
@cfy.pass_logger
def init(blueprint_path,
blueprint_filename,
blueprint_id,
reset_context,
inputs,
install_plugins,
hard,
enable_colors,
logger):
"""Initialize a Cloudify environment.
This is required to perform many actions and should be the first
action performed after installing Cloudify.
Note: Running `cfy install` or `cfy profiles use` will
initialize an environment automatically.
Providing a `BLUEPRINT_PATH` will also initialize a blueprint to
work on.
After initialization, the CLI's configuration can be found under
~/.cloudify/config.yaml. For more information refer to the docs
at http://docs.getcloudify.org
"""
profile_name = 'local'
if blueprint_path:
if reset_context or hard:
logger.warning(
'The `--reset-context` and `--hard` flags are ignored '
'when initializing a blueprint')
init_local_profile(
reset_context=True,
hard=False,
enable_colors=enable_colors
)
env.set_active_profile(profile_name)
processed_blueprint_path = blueprint.get(
blueprint_path,
blueprint_filename
)
blueprint_id = blueprint_id or blueprint.generate_id(
processed_blueprint_path,
blueprint_filename
)
if os.path.isdir(local.storage_dir(blueprint_id)):
shutil.rmtree(local.storage_dir(blueprint_id))
try:
storage = local.get_storage()
local.initialize_blueprint(
blueprint_path=processed_blueprint_path,
name=blueprint_id or 'local',
inputs=inputs,
storage=storage,
install_plugins=install_plugins,
resolver=config.get_import_resolver()
)
except ImportError as e:
e.possible_solutions = [
"Run `cfy init {0} --install-plugins`".format(blueprint_path),
"Run `cfy install-plugins {0}`".format(blueprint_path)
]
raise
logger.info("Initialized {0}\nIf you make changes to the "
"blueprint, run `cfy init {0}` "
"again to apply them".format(blueprint_path))
else:
if env.is_initialized() and not (reset_context or hard):
raise CloudifyCliError(
'Environment is already initialized. '
'You can reset the environment by running `cfy init -r`')
init_local_profile(reset_context, hard, enable_colors)
env.set_active_profile(profile_name)
@cfy.pass_logger
def init_local_profile(reset_context=False,
hard=False,
enable_colors=False,
logger=None):
logger.info('Initializing local profile ...')
if reset_context:
if hard:
os.remove(config.CLOUDIFY_CONFIG_PATH)
# else:
# TODO: Is this check necessary?
# _raise_initialized_error('local')
_create_profiles_dir_and_config(hard, enable_colors)
logger.info('Initialization completed successfully')
@cfy.pass_logger
def init_manager_profile(profile_name,
reset_context=False,
hard=False,
enable_colors=False,
logger=None):
logger.info('Initializing profile {0}...'.format(profile_name))
context_file_path = env.get_context_path(profile_name)
if context_file_path and os.path.isfile(context_file_path):
if reset_context:
if hard:
os.remove(config.CLOUDIFY_CONFIG_PATH)
else:
os.remove(context_file_path)
else:
_raise_initialized_error(profile_name)
_create_profiles_dir_and_config(hard, enable_colors)
profile = env.ProfileContext()
profile.manager_ip = profile_name
profile.save()
logger.info('Initialization completed successfully')
def _create_profiles_dir_and_config(hard, enable_colors):
if not os.path.isdir(env.PROFILES_DIR):
os.makedirs(env.PROFILES_DIR, mode=0o700)
if not env.config_initialized_with_logging() or hard:
set_config(enable_colors=enable_colors)
configure_loggers()
def _raise_initialized_error(profile_name):
error = exceptions.CloudifyCliError(
'{0} profile already initialized'.format(profile_name))
error.possible_solutions = [
"Run 'cfy init -r' to force re-initialization "
]
raise error
def set_config(enable_colors=False):
cli_config = pkg_resources.resource_string(
cloudify_cli.__name__,
'config/config_template.yaml').decode('utf-8')
enable_colors = str(enable_colors).lower()
template = Template(cli_config)
rendered = template.render(
log_path=DEFAULT_LOG_FILE,
enable_colors=enable_colors
)
with open(config.CLOUDIFY_CONFIG_PATH, 'a') as f:
f.write(rendered)
f.write(os.linesep)
|
the-stack_106_28276 | # -*- coding: utf-8 -*-
# Save Model Using Pickle
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import pickle
url = './data/pima-indians-diabetes.data.csv'
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = read_csv(url, names=names)
array = dataframe.values
X = array[:,0:8]
Y = array[:,8]
test_size = 0.33
seed = 7
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size,
random_state=seed)
# Fit the model on 33%
model = LogisticRegression()
model.fit(X_train, Y_train)
# save the model to disk
filename = 'finalized_model.sav'
pickle.dump(model, open(filename, 'wb'))
# some time later...
# load the model from disk
loaded_model = pickle.load(open(filename, 'rb'))
result = loaded_model.score(X_test, Y_test)
print(result)
|
the-stack_106_28277 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2018 Michael J. Hayford
""" Top level model classes
.. Created on Wed Mar 14 11:08:28 2018
.. codeauthor: Michael J. Hayford
"""
import os.path
import json_tricks
import rayoptics
import rayoptics.elem.elements as ele
import rayoptics.optical.model_constants as mc
from rayoptics.elem.elements import ElementModel
from rayoptics.elem.parttree import (PartTree, elements_from_sequence)
from rayoptics.parax.paraxialdesign import ParaxialModel
from rayoptics.seq.sequential import SequentialModel
from rayoptics.raytr.opticalspec import OpticalSpecs
from rayoptics.parax.specsheet import create_specsheet_from_model
from rayoptics.optical.model_enums import get_dimension_for_type
class SystemSpec:
""" Container for units and other system level constants
Attributes:
title (str): a short description of the model
initials (str): user initials or other id
temperature (float): model temperature in degrees Celsius
pressure (float): model pressure in mm/Hg
"""
def __init__(self):
self.title = ''
self.initials = ''
self.dimensions = 'mm'
self.temperature = 20.0
self.pressure = 760.0
def __json_decode__(self, **attrs):
for a_key, a_val in attrs.items():
if a_key == 'dimensions':
self._dimensions = (a_val if isinstance(a_val, str)
else get_dimension_for_type(a_val))
else:
setattr(self, a_key, a_val)
def listobj_str(self):
vs = vars(self)
o_str = f"{type(self).__name__}:\n"
for k, v in vs.items():
o_str += f"{k}: {v}\n"
return o_str
@property
def dimensions(self):
""" the model linear units (str). """
return self._dimensions
@dimensions.setter
def dimensions(self, value):
self._dimensions = (value if isinstance(value, str)
else get_dimension_for_type(value))
def nm_to_sys_units(self, nm):
""" convert nm to system units
Args:
nm (float): value in nm
Returns:
float: value converted to system units
"""
if self.dimensions == 'm':
return 1e-9 * nm
elif self.dimensions == 'cm':
return 1e-7 * nm
elif self.dimensions == 'mm':
return 1e-6 * nm
elif self.dimensions == 'in':
return 1e-6 * nm/25.4
elif self.dimensions == 'ft':
return 1e-6 * nm/304.8
else:
return nm
class OpticalModel:
""" Top level container for optical model.
The OpticalModel serves as a top level container of model properties.
Key aspects are built-in element and surface based repesentations of the
optical surfaces.
A sequential optical model is a sequence of surfaces and gaps.
Additionally, it includes optical usage information to specify the
aperture, field of view, spectrum and focus.
Attributes:
ro_version: current version of rayoptics
radius_mode: if True output radius, else output curvature
specsheet: :class:`~rayoptics.parax.specsheet.SpecSheet`
system_spec: :class:`.SystemSpec`
seq_model: :class:`~rayoptics.seq.sequential.SequentialModel`
optical_spec: :class:`~rayoptics.raytr.opticalspec.OpticalSpecs`
parax_model: :class:`~rayoptics.parax.paraxialdesign.ParaxialModel`
ele_model: :class:`~rayoptics.elem.elements.ElementModel`
"""
def __init__(self, radius_mode=False, specsheet=None, **kwargs):
self.ro_version = rayoptics.__version__
self.radius_mode = radius_mode
self.specsheet = specsheet
self.system_spec = SystemSpec()
self.seq_model = SequentialModel(self, **kwargs)
self.optical_spec = OpticalSpecs(self, specsheet=specsheet, **kwargs)
self.parax_model = ParaxialModel(self, **kwargs)
self.ele_model = ElementModel(self, **kwargs)
self.part_tree = PartTree(self, **kwargs)
self.map_submodels()
if self.specsheet:
self.set_from_specsheet()
if kwargs.get('do_init', True):
# need to do this after OpticalSpec is initialized
self.seq_model.update_model()
elements_from_sequence(self.ele_model,
self.seq_model,
self.part_tree)
def map_submodels(self):
"""Setup machinery for model mapping api. """
submodels = {}
submodels['specsheet'] = self.specsheet
submodels['system_spec'] = self.system_spec
submodels['seq_model'] = self.seq_model
submodels['optical_spec'] = self.optical_spec
submodels['parax_model'] = self.parax_model
submodels['ele_model'] = self.ele_model
submodels['part_tree'] = self.part_tree
# Add a level of indirection to allow short and long aliases
submodel_aliases = {
'ss': 'specsheet', 'specsheet': 'specsheet',
'sys': 'system_spec', 'system_spec': 'system_spec',
'sm': 'seq_model', 'seq_model': 'seq_model',
'osp': 'optical_spec', 'optical_spec': 'optical_spec',
'pm': 'parax_model', 'parax_model': 'parax_model',
'em': 'ele_model', 'ele_model': 'ele_model',
'pt': 'part_tree', 'part_tree': 'part_tree',
}
self._submodels = submodels, submodel_aliases
def __getitem__(self, key):
""" Provide mapping interface to submodels. """
submodels, submodel_aliases = self._submodels
return submodels[submodel_aliases[key]]
def name(self):
return self.system_spec.title
def reset(self):
rdm = self.radius_mode
self.__init__()
self.radius_mode = rdm
def __json_encode__(self):
attrs = dict(vars(self))
if hasattr(self, 'app_manager'):
del attrs['app_manager']
del attrs['_submodels']
return attrs
def listobj_str(self):
vs = vars(self)
o_str = f"{type(self).__name__}:\n"
for k, v in vs.items():
o_str += f"{k}: {v}\n"
return o_str
def set_from_specsheet(self, specsheet=None):
if specsheet:
self.specsheet = specsheet
else:
specsheet = self.specsheet
self.optical_spec.set_from_specsheet(specsheet)
self.seq_model.set_from_specsheet(specsheet)
def save_model(self, file_name, version=None):
"""Save the optical_model in a ray-optics JSON file.
Args:
file_name: str or Path
version: optional override for rayoptics version number
"""
file_extension = os.path.splitext(file_name)[1]
filename = file_name if len(file_extension) > 0 else file_name+'.roa'
# update version number prior to writing file.
self.ro_version = rayoptics.__version__ if version is None else version
fs_dict = {}
fs_dict['optical_model'] = self
with open(filename, 'w') as f:
json_tricks.dump(fs_dict, f, indent=1,
separators=(',', ':'), allow_nan=True)
def sync_to_restore(self):
if not hasattr(self, 'ro_version'):
self.ro_version = rayoptics.__version__
self.seq_model.sync_to_restore(self)
self.ele_model.sync_to_restore(self)
self.optical_spec.sync_to_restore(self)
if hasattr(self, 'parax_model'):
self.parax_model.sync_to_restore(self)
else:
self.parax_model = ParaxialModel(self)
if hasattr(self, 'specsheet'):
self.specsheet.sync_to_restore(self)
else:
self.specsheet = None
if hasattr(self, 'part_tree'):
self.part_tree.sync_to_restore(self)
else:
self.part_tree = PartTree(self)
self.part_tree.add_element_model_to_tree(self.ele_model)
self.map_submodels()
self.update_model()
def update_model(self, **kwargs):
self.seq_model.update_model(**kwargs)
self.optical_spec.update_model(**kwargs)
self.parax_model.update_model(**kwargs)
self.ele_model.update_model(**kwargs)
self.part_tree.update_model(**kwargs)
if self.specsheet is None:
self.specsheet = create_specsheet_from_model(self)
self.map_submodels()
def nm_to_sys_units(self, nm):
""" convert nm to system units
Args:
nm (float): value in nm
Returns:
float: value converted to system units
"""
return self.system_spec.nm_to_sys_units(nm)
def add_lens(self, **kwargs):
descriptor = ele.create_lens(**kwargs)
kwargs['insert'] = True
self.insert_ifc_gp_ele(*descriptor, **kwargs)
def add_mirror(self, **kwargs):
descriptor = ele.create_mirror(**kwargs)
kwargs['insert'] = True
self.insert_ifc_gp_ele(*descriptor, **kwargs)
def add_thinlens(self, **kwargs):
descriptor = ele.create_thinlens(**kwargs)
kwargs['insert'] = True
self.insert_ifc_gp_ele(*descriptor, **kwargs)
def add_dummy_plane(self, **kwargs):
descriptor = ele.create_dummy_plane(**kwargs)
kwargs['insert'] = True
self.insert_ifc_gp_ele(*descriptor, **kwargs)
def add_from_file(self, filename, **kwargs):
descriptor = ele.create_from_file(filename, **kwargs)
kwargs['insert'] = True
self.insert_ifc_gp_ele(*descriptor, **kwargs)
def insert_ifc_gp_ele(self, *descriptor, **kwargs):
""" insert interfaces and gaps into seq_model and eles into ele_model
Args:
descriptor: a tuple of additions for the sequential, element and
part tree models
kwargs: keyword arguments including
idx: insertion point in the sequential model
insert: if True, insert the chunk, otherwise replace it
t: the thickness following a chuck when inserting
"""
sm = self['seq_model']
seq, elm, e_node = descriptor
if 'idx' in kwargs:
sm.cur_surface = kwargs['idx']
idx = sm.cur_surface
e_node.parent = self.part_tree.root_node
# distinguish between adding a new chunk, which requires splitting a
# gap in two, and replacing a node, which uses the existing gaps.
ins_prev_gap = False
if 'insert' in kwargs:
t_after = kwargs['t'] if 't' in kwargs else 0.
if sm.get_num_surfaces() == 2:
# only object space gap, add image space gap following this
gap_label = "Image space"
ins_prev_gap = False
else:
# we have both object and image space gaps; retain the image
# space gap by splitting and inserting the new gap before the
# inserted chunk, unless we're inserting before idx=1.
gap_label = None
if idx > 0:
ins_prev_gap = True
if ins_prev_gap:
t_air, sm.gaps[idx].thi = sm.gaps[idx].thi, t_after
else:
t_air = t_after
g, ag, ag_node = ele.create_air_gap(t=t_air, label=gap_label)
if not ins_prev_gap:
seq[-1][mc.Gap] = g
elm.append(ag)
ag_node.parent = self.part_tree.root_node
else:
# replacing an existing node. need to hook new chunk final
# interface to the existing gap and following (air gap) element
g = sm.gaps[sm.cur_surface+1]
seq[-1][mc.Gap] = g
ag, ag_node = self.part_tree.parent_object(g, '#airgap') # ag.idx = seq[-1][mc.Intfc]
for sg in seq:
if ins_prev_gap:
gap, g = g, sg[mc.Gap]
else:
gap = sg[mc.Gap]
sm.insert(sg[mc.Intfc], gap, prev=ins_prev_gap)
for e in elm:
self.ele_model.add_element(e)
self.ele_model.sequence_elements()
def remove_ifc_gp_ele(self, *descriptor, **kwargs):
""" remove interfaces and gaps from seq_model and eles from ele_model
"""
seq, elm, e_node = descriptor
sg = seq[0]
idx = self.seq_model.ifcs.index(sg[mc.Intfc])
# verify that the sequences match
seq_match = True
for i, sg in enumerate(seq):
if sg[0] is not self.seq_model.ifcs[idx+i]:
seq_match = False
break
if seq_match:
# remove interfaces in reverse
for i in range(idx+len(seq)-1, idx-1, -1):
self.seq_model.remove(i)
for e in elm:
self.ele_model.remove_element(e)
e_node.parent = None
def remove_node(self, e_node):
# remove interfaces from seq_model
self.seq_model.remove_node(e_node)
# remove elements from ele_model
self.ele_model.remove_node(e_node)
# unhook node
e_node.parent = None
def rebuild_from_seq(self):
""" Rebuild ele_model and part_tree from seq_model. """
self['em'].elements = []
self['pt'].root_node.children = []
elements_from_sequence(self['em'], self['sm'], self['pt'])
|
the-stack_106_28278 | #!/usr/bin/env python3
"""
Scripts to drive a donkey 2 car
Usage:
manage.py (drive)
Options:
-h --help Show this screen.
"""
import os
import time
from docopt import docopt
import donkeycar as dk
#import parts
from donkeycar.parts.controller import LocalWebController, \
JoystickController, WebFpv
from donkeycar.parts.throttle_filter import ThrottleFilter
from donkeycar.utils import *
from socket import gethostname
def drive(cfg ):
'''
Construct a working robotic vehicle from many parts.
Each part runs as a job in the Vehicle loop, calling either
it's run or run_threaded method depending on the constructor flag `threaded`.
All parts are updated one after another at the framerate given in
cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
Parts may have named outputs and inputs. The framework handles passing named outputs
to parts requesting the same named input.
'''
#Initialize car
V = dk.vehicle.Vehicle()
ctr = LocalWebController()
V.add(ctr,
inputs=['cam/image_array', 'tub/num_records'],
outputs=['angle', 'throttle', 'user/mode', 'recording'],
threaded=True)
#this throttle filter will allow one tap back for esc reverse
th_filter = ThrottleFilter()
V.add(th_filter, inputs=['throttle'], outputs=['throttle'])
drive_train = None
#Drive train setup
if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK":
pass
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC":
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
drive_train = dict()
drive_train['steering'] = steering
drive_train['throttle'] = throttle
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
elif cfg.DRIVE_TRAIN_TYPE == "MM1":
from donkeycar.parts.robohat import RoboHATDriver
drive_train = RoboHATDriver(cfg)
V.add(drive_train, inputs=['angle', 'throttle'])
ctr.drive_train = drive_train
ctr.drive_train_type = cfg.DRIVE_TRAIN_TYPE
class ShowHowTo:
def __init__(self):
print(f"Go to http://{gethostname()}.local:8887/calibrate to calibrate ")
def run(self):
pass
V.add(ShowHowTo())
#run the vehicle for 20 seconds
V.start(rate_hz=cfg.DRIVE_LOOP_HZ,
max_loop_count=cfg.MAX_LOOPS)
if __name__ == '__main__':
args = docopt(__doc__)
cfg = dk.load_config()
if args['drive']:
drive(cfg)
|
the-stack_106_28279 | # -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Pauli X (bit-flip) gate.
Author: Andrew Cross
"""
from qiskit import QuantumRegister
from qiskit import QuantumCircuit
from qiskit import Gate
from qiskit import CompositeGate
from qiskit import InstructionSet
from qiskit.extensions.standard import header
class XGate(Gate):
"""Pauli X (bit-flip) gate."""
def __init__(self, qubit, circ=None):
"""Create new X gate."""
super(XGate, self).__init__("x", [], [qubit], circ)
def qasm(self):
"""Return OPENQASM string."""
qubit = self.arg[0]
return self._qasmif("x %s[%d];" % (qubit[0].name, qubit[1]))
def inverse(self):
"""Invert this gate."""
return self # self-inverse
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
self._modifiers(circ.x(self.arg[0]))
def x(self, q):
"""Apply X to q."""
if isinstance(q, QuantumRegister):
gs = InstructionSet()
for j in range(q.size):
gs.add(self.x((q, j)))
return gs
else:
self._check_qubit(q)
return self._attach(XGate(q, self))
QuantumCircuit.x = x
CompositeGate.x = x
|
the-stack_106_28284 | import copy
from typing import List
import torch
from sympy import simplify_logic
from entropy_lens.logic.metrics import test_explanation
from entropy_lens.logic.utils import replace_names
from entropy_lens.nn import Conceptizator
from entropy_lens.nn.logic import EntropyLinear
def explain_class(model: torch.nn.Module, x, y1h, x_val: torch.Tensor, y_val1h: torch.Tensor,
target_class: int, max_minterm_complexity: int = None, topk_explanations: int = 3,
max_accuracy: bool = False, concept_names: List = None) -> [str, str]:
"""
Generate a local explanation for a single sample.
:param model: pytorch model
:param x: input samples to extract logic formulas.
:param y1h: target labels to extract logic formulas (MUST be one-hot encoded).
:param x_val: input samples to validate logic formulas.
:param y_val1h: target labels to validate logic formulas (MUST be one-hot encoded).
:param target_class: target class.
:param max_minterm_complexity: maximum number of concepts per logic formula (per sample).
:param topk_explanations: number of local explanations to be combined.
:param max_accuracy: if True a formula is simplified only if the simplified formula gets 100% accuracy.
:param concept_names: list containing the names of the input concepts.
:return: Global explanation
"""
x_correct, y_correct1h = _get_correct_data(x, y1h, model, target_class)
if x_correct is None:
return None, None
activation = 'identity_bool'
feature_names = [f'feature{j:010}' for j in range(x_correct.size(1))]
conceptizator = Conceptizator(activation)
y_correct = conceptizator(y_correct1h[:, target_class])
y_val = conceptizator(y_val1h[:, target_class])
class_explanation = ''
class_explanation_raw = ''
for layer_id, module in enumerate(model.children()):
if isinstance(module, EntropyLinear):
local_explanations = []
local_explanations_accuracies = {}
local_explanations_raw = {}
# look at the "positive" rows of the truth table only
positive_samples = torch.nonzero(y_correct)
for positive_sample in positive_samples:
local_explanation, local_explanation_raw = _local_explanation(module, feature_names, positive_sample,
local_explanations_raw,
x_correct, y_correct1h,
target_class, max_accuracy,
max_minterm_complexity)
# test explanation accuracy
if local_explanation_raw not in local_explanations_accuracies:
accuracy, _ = test_explanation(local_explanation_raw, x_val, y_val1h, target_class)
local_explanations_accuracies[local_explanation_raw] = (local_explanation, accuracy)
if local_explanation and local_explanation_raw:
local_explanations_raw[local_explanation_raw] = local_explanation_raw
local_explanations.append(local_explanation)
# aggregate local explanations and replace concept names in the final formula
aggregated_explanation, best_acc = _aggregate_explanations(local_explanations_accuracies,
topk_explanations,
target_class, x_val, y_val1h)
class_explanation_raw = str(aggregated_explanation)
class_explanation = class_explanation_raw
if concept_names is not None:
class_explanation = replace_names(class_explanation, concept_names)
break
return class_explanation[1:-1], class_explanation_raw
def _simplify_formula(explanation: str, x: torch.Tensor, y: torch.Tensor, target_class: int, max_accuracy: bool) -> str:
"""
Simplify formula to a simpler one that is still coherent.
:param explanation: local formula to be simplified.
:param x: input data.
:param y: target labels (1D, categorical NOT one-hot encoded).
:param target_class: target class
:param max_accuracy: drop term only if it gets max accuracy
:return: Simplified formula
"""
base_accuracy, _ = test_explanation(explanation, x, y, target_class)
for term in explanation.split(' & '):
explanation_simplified = copy.deepcopy(explanation)
if explanation_simplified.endswith(f'{term}'):
explanation_simplified = explanation_simplified.replace(f' & {term}', '')
else:
explanation_simplified = explanation_simplified.replace(f'{term} & ', '')
if explanation_simplified:
accuracy, preds = test_explanation(explanation_simplified, x, y, target_class)
if (max_accuracy and accuracy == 1.) or (not max_accuracy and accuracy >= base_accuracy):
explanation = copy.deepcopy(explanation_simplified)
base_accuracy = accuracy
return explanation
def _aggregate_explanations(local_explanations_accuracy, topk_explanations, target_class, x, y):
"""
Sort explanations by accuracy and then aggregate explanations which increase the accuracy of the aggregated formula.
:param local_explanations_accuracy: dictionary of explanations and related accuracies.
:param topk_explanations: limits the number of explanations to be aggregated.
:param target_class: target class.
:param x: observations in validation set.
:param y: labels in validation set.
:return:
"""
if len(local_explanations_accuracy) == 0:
return ''
else:
# get the topk most accurate local explanations
local_explanations_sorted = sorted(local_explanations_accuracy.items(), key=lambda x: -x[1][1])[:topk_explanations]
explanations = []
best_accuracy = 0
best_explanation = ''
for explanation_raw, (explanation, accuracy) in local_explanations_sorted:
explanations.append(explanation)
# aggregate example-level explanations
aggregated_explanation = ' | '.join(explanations)
aggregated_explanation_simplified = simplify_logic(aggregated_explanation, 'dnf', force=True)
aggregated_explanation_simplified = f'({aggregated_explanation_simplified})'
if aggregated_explanation_simplified in ['', 'False', 'True', '(False)', '(True)']:
continue
accuracy, _ = test_explanation(aggregated_explanation_simplified, x, y, target_class)
if accuracy > best_accuracy:
best_accuracy = accuracy
best_explanation = aggregated_explanation_simplified
explanations = [best_explanation]
return best_explanation, best_accuracy
def _local_explanation(module, feature_names, neuron_id, neuron_explanations_raw,
c_validation, y_target, target_class, max_accuracy, max_minterm_complexity):
# explanation is the conjunction of non-pruned features
explanation_raw = ''
if max_minterm_complexity:
concepts_to_retain = torch.argsort(module.alpha[target_class], descending=True)[:max_minterm_complexity]
else:
non_pruned_concepts = module.concept_mask[target_class]
concepts_sorted = torch.argsort(module.alpha[target_class])
concepts_to_retain = concepts_sorted[non_pruned_concepts[concepts_sorted]]
for j in concepts_to_retain:
if feature_names[j] not in ['()', '']:
if explanation_raw:
explanation_raw += ' & '
if module.conceptizator.concepts[0][neuron_id, j] > module.conceptizator.threshold:
# if non_pruned_neurons[j] > 0:
explanation_raw += feature_names[j]
else:
explanation_raw += f'~{feature_names[j]}'
explanation_raw = str(explanation_raw)
if explanation_raw in ['', 'False', 'True', '(False)', '(True)']:
return None, None
simplify = True
if explanation_raw in neuron_explanations_raw:
explanation = neuron_explanations_raw[explanation_raw]
elif simplify:
explanation = _simplify_formula(explanation_raw, c_validation, y_target, target_class, max_accuracy)
else:
explanation = explanation_raw
if explanation in ['', 'False', 'True', '(False)', '(True)']:
return None, None
return explanation, explanation_raw
def _get_correct_data(x, y, model, target_class):
x_target = x[y[:, target_class] == 1]
y_target = y[y[:, target_class] == 1]
# get model's predictions
preds = model(x_target).squeeze(-1)
# identify samples correctly classified of the target class
correct_mask = y_target[:, target_class].eq(preds[:, target_class]>0.5)
if sum(correct_mask) < 2:
return None, None
x_target_correct = x_target[correct_mask]
y_target_correct = y_target[correct_mask]
# collapse samples having the same boolean values and class label different from the target class
x_reduced_opposite = x[y[:, target_class] != 1]
y_reduced_opposite = y[y[:, target_class] != 1]
preds_opposite = model(x_reduced_opposite).squeeze(-1)
# identify samples correctly classified of the opposite class
correct_mask = y_reduced_opposite[:, target_class].eq(preds_opposite[:, target_class]>0.5)
if sum(correct_mask) < 2:
return None, None
x_reduced_opposite_correct = x_reduced_opposite[correct_mask]
y_reduced_opposite_correct = y_reduced_opposite[correct_mask]
# select the subset of samples belonging to the target class
x_validation = torch.cat([x_reduced_opposite_correct, x_target_correct], dim=0)
y_validation = torch.cat([y_reduced_opposite_correct, y_target_correct], dim=0)
model.eval()
model(x_validation)
return x_validation, y_validation
|
the-stack_106_28286 | from django.apps import AppConfig
from django.conf import settings
from django.core import exceptions
from raven.contrib.django.models import initialize
class SenSysConfig(AppConfig):
name = 'sensys.contrib.django'
label = 'sensys_contrib_django'
verbose_name = 'SenSys'
def ready(self):
# step 1: check custom sensys settings
if not getattr(settings, 'SENTRY_CLIENT', False):
setattr(settings, 'SENTRY_CLIENT', 'sensys.contrib.django.DjangoSenSysClient')
if not getattr(settings, 'SENSYS_TAG', False):
raise exceptions.ImproperlyConfigured('SENSYS_TAG setting is not define.')
# step 2: initialize raven client
initialize()
|
the-stack_106_28288 | """Support for tracking consumption over given periods of time."""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import discovery
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from .const import (
DOMAIN,
SIGNAL_RESET_METER,
METER_TYPES,
CONF_METER_TYPE,
CONF_METER_OFFSET,
CONF_METER_NET_CONSUMPTION,
CONF_SOURCE_SENSOR,
CONF_TARIFF_ENTITY,
CONF_TARIFF,
CONF_TARIFFS,
CONF_METER,
DATA_UTILITY,
SERVICE_RESET,
SERVICE_SELECT_TARIFF,
SERVICE_SELECT_NEXT_TARIFF,
ATTR_TARIFF,
)
_LOGGER = logging.getLogger(__name__)
TARIFF_ICON = "mdi:clock-outline"
ATTR_TARIFFS = "tariffs"
DEFAULT_OFFSET = timedelta(hours=0)
METER_CONFIG_SCHEMA = vol.Schema(
{
vol.Required(CONF_SOURCE_SENSOR): cv.entity_id,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_METER_TYPE): vol.In(METER_TYPES),
vol.Optional(CONF_METER_OFFSET, default=DEFAULT_OFFSET): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_METER_NET_CONSUMPTION, default=False): cv.boolean,
vol.Optional(CONF_TARIFFS, default=[]): vol.All(cv.ensure_list, [cv.string]),
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({cv.slug: METER_CONFIG_SCHEMA})}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass, config):
"""Set up an Utility Meter."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
hass.data[DATA_UTILITY] = {}
register_services = False
for meter, conf in config.get(DOMAIN).items():
_LOGGER.debug("Setup %s.%s", DOMAIN, meter)
hass.data[DATA_UTILITY][meter] = conf
if not conf[CONF_TARIFFS]:
# only one entity is required
hass.async_create_task(
discovery.async_load_platform(
hass,
SENSOR_DOMAIN,
DOMAIN,
[{CONF_METER: meter, CONF_NAME: meter}],
config,
)
)
else:
# create tariff selection
await component.async_add_entities(
[TariffSelect(meter, list(conf[CONF_TARIFFS]))]
)
hass.data[DATA_UTILITY][meter][CONF_TARIFF_ENTITY] = "{}.{}".format(
DOMAIN, meter
)
# add one meter for each tariff
tariff_confs = []
for tariff in conf[CONF_TARIFFS]:
tariff_confs.append(
{
CONF_METER: meter,
CONF_NAME: f"{meter} {tariff}",
CONF_TARIFF: tariff,
}
)
hass.async_create_task(
discovery.async_load_platform(
hass, SENSOR_DOMAIN, DOMAIN, tariff_confs, config
)
)
register_services = True
if register_services:
component.async_register_entity_service(SERVICE_RESET, {}, "async_reset_meters")
component.async_register_entity_service(
SERVICE_SELECT_TARIFF,
{vol.Required(ATTR_TARIFF): cv.string},
"async_select_tariff",
)
component.async_register_entity_service(
SERVICE_SELECT_NEXT_TARIFF, {}, "async_next_tariff"
)
return True
class TariffSelect(RestoreEntity):
"""Representation of a Tariff selector."""
def __init__(self, name, tariffs):
"""Initialize a tariff selector."""
self._name = name
self._current_tariff = None
self._tariffs = tariffs
self._icon = TARIFF_ICON
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
if self._current_tariff is not None:
return
state = await self.async_get_last_state()
if not state or state.state not in self._tariffs:
self._current_tariff = self._tariffs[0]
else:
self._current_tariff = state.state
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return the name of the select input."""
return self._name
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._icon
@property
def state(self):
"""Return the state of the component."""
return self._current_tariff
@property
def state_attributes(self):
"""Return the state attributes."""
return {ATTR_TARIFFS: self._tariffs}
async def async_reset_meters(self):
"""Reset all sensors of this meter."""
_LOGGER.debug("reset meter %s", self.entity_id)
async_dispatcher_send(self.hass, SIGNAL_RESET_METER, self.entity_id)
async def async_select_tariff(self, tariff):
"""Select new option."""
if tariff not in self._tariffs:
_LOGGER.warning(
"Invalid tariff: %s (possible tariffs: %s)",
tariff,
", ".join(self._tariffs),
)
return
self._current_tariff = tariff
await self.async_update_ha_state()
async def async_next_tariff(self):
"""Offset current index."""
current_index = self._tariffs.index(self._current_tariff)
new_index = (current_index + 1) % len(self._tariffs)
self._current_tariff = self._tariffs[new_index]
await self.async_update_ha_state()
|
the-stack_106_28289 | #!/usr/bin/env python3
import typer
from typing import Optional
from rich import print
import os
import tempfile
import subprocess
from rich.markup import escape
from rich.console import Console
import random
c = Console(highlight=False)
def print(s):
c.print(s)
def run(s: Optional[str] = None, race: bool = True) -> bool:
success = True
env = os.environ.copy()
env["DEBUG"] = "true"
if s is None:
p = subprocess.Popen(['go', 'test', '-v', '-count=1', '-race' if race else ''],stdout=subprocess.PIPE, env=env)
else:
p = subprocess.Popen(['go', 'test', '-v', '-run', s, '-count=1', '-race' if race else ''],stdout=subprocess.PIPE, env=env)
output = ""
for line in iter(p.stdout.readline, b''):
out = line.decode('utf-8')
output += out
out = out.strip("\n")
if "INFO" in out:
continue
if "PASS" in out:
print(f"[green]{escape(out)}[/green]")
elif "FAIL" in out:
print(f"[red]{escape(out)}[/red]")
success = False
else:
print(escape(out))
if not success:
fn = f"{s + '-' if s is not None else ''}fail-{random.randint(1,10000)}"
print(f"[magenta]saving failed log file to {fn}")
with open(fn, "w") as f:
f.write(output)
return success
def main(n: int, testname: Optional[str] = None, race: bool = True):
success = True
for i in range(n):
print(f"[yellow]Running test {i+1} of {n}[/yellow]")
if testname is not None:
success = success and run(testname, race=race)
else:
success = success and run(race=race)
if not success:
break
if success:
print("[green bold]YAYAYAY EVERYTHING WORKS")
typer.run(main)
|
the-stack_106_28291 | # Copyright 2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Abstract syntax tree (AST) transformer to trace function calls.
"""
from __future__ import absolute_import
import ast
from traitlets import HasTraits, Any
from .ast_util import ast_has_starred, get_single_target, \
to_attribute, to_call, to_name, to_name_constant, to_list, to_tuple
class ASTTracer(HasTraits):
""" Trace function calls and variable gets and sets by AST rewriting.
This class should be used with `ASTTraceTransformer`. It is very low-level
and should be supplemented with additional logic to be useful. The `Tracer`
class in this subpackage shows how to do this in an event-based way.
"""
def trace_function(self, function, nargs):
""" Called after function object (not function call!) is evaluated.
"""
return self._unbox(self._trace_function(function, nargs))
def trace_argument(self, arg_value, arg_name=None, nstars=0):
""" Called after function argument is evaluated.
"""
return self._unbox(self._trace_argument(arg_value, arg_name, nstars))
def trace_return(self, return_value, multiple_values=False):
""" Called after function returns.
"""
return self._unbox(self._trace_return(return_value, multiple_values))
def trace_access(self, name, value):
""" Called after a variable is accessed.
"""
return self._unbox(self._trace_access(name, value))
def trace_assign(self, name, value):
""" Called before a variable is assigned.
"""
return self._unbox(self._trace_assign(name, value))
def trace_delete(self, name):
""" Called before a variable is deleted.
"""
return self._trace_delete(name)
def _trace_function(self, function, args):
""" Called after function object is evaluated.
May be reimplemented in subclass.
"""
return function
def _trace_argument(self, arg_value, arg_name=None, nstars=0):
""" Called after function argument is evaluated.
May be reimplemented in subclass.
"""
return arg_value
def _trace_return(self, return_value, multiple_values=False):
""" Called after function returns.
May be reimplemented in subclasss.
"""
return return_value
def _trace_access(self, name, value):
""" Called after a variable is accessed.
May be reimplemented in subclass.
"""
return value
def _trace_assign(self, name, value):
""" Called before a variable is assigned.
May be reimplemented in subclass.
"""
return value
def _trace_delete(self, name):
""" Called before a variable is deleted.
May be reimplemented in subclass.
"""
def _unbox(self, x):
""" Unbox a value, if it is boxed.
"""
return x.value if isinstance(x, BoxedValue) else x
class ASTTraceTransformer(ast.NodeTransformer):
""" Rewrite AST to trace function calls and variable gets and sets.
"""
def __init__(self, tracer):
super(ASTTraceTransformer, self).__init__()
self.tracer = to_name(tracer)
self._state = {} # Hack to pass state to immediate child node.
def tracer_method(self, method, private=False):
""" Make AST node for a method on the tracer.
"""
if private:
method = '_' + method
return to_attribute(self.tracer, method)
def visit(self, node):
""" Reimplemented to clear state on visit.
"""
self._state.clear()
return super(ASTTraceTransformer, self).visit(node)
def visit_with_state(self, node, **kwargs):
""" Visit node, after setting state for this (non-generic) visit only.
"""
self._state.clear()
self._state.update(kwargs)
return super(ASTTraceTransformer, self).visit(node)
def visit_Call(self, call):
""" Rewrite AST Call node with tracing.
Replaces function and method calls, e.g.
f(x,y,z=1)
with wrapped calls, e.g.
trace_return(trace_function(f)(
trace_argument(x), trace_argument(y), z=trace_argument(1,'z')))
The AST transformer allows boxed values (see `BoxedValue` type) to be
passed through compositions of trace calls via the '_trace_*' variants
of the `trace_*` methods. E.g., the `x` argument in the function call
f(x=g())
becomes
...(x=trace_argument(_trace_return(...), 'x'))
"""
boxed = self._state.get('boxed', False)
multiple_values = self._state.get('multiple_values', False)
func = self.visit(call.func)
# Visit positional and keyword arguments.
args = [ self.visit_argument(arg) for arg in call.args ]
keywords = [ ast.keyword(kw.arg, self.visit_argument(
kw.value, kw.arg, 2 if kw.arg is None else 0
)) for kw in call.keywords ]
nargs = len(args) + len(keywords)
# Handle *args and **kwargs in Python 3.4 and lower.
starargs, kwargs = None, None
if not ast_has_starred:
if call.starargs is not None:
starargs = self.visit_argument(call.starargs, nstars=1)
nargs += 1
if call.kwargs is not None:
kwargs = self.visit_argument(call.kwargs, nstars=2)
nargs += 1
return to_call(
self.tracer_method('trace_return', private=boxed), [
to_call(
to_call(self.tracer_method('trace_function'), [
func, ast.Num(nargs)
]),
args, keywords, starargs, kwargs
),
to_name_constant(multiple_values),
])
def visit_argument(self, arg_value, arg_name=None, nstars=0):
""" Rewrite AST node appearing as function argument.
"""
# Unpack starred expression in Python 3.5+.
starred = ast_has_starred and isinstance(arg_value, ast.Starred)
if starred:
arg_value = arg_value.value
nstars = 1
# Create new call.
args = [ self.visit_with_state(arg_value, boxed=True) ]
if arg_name:
args += [ ast.Str(arg_name) ]
keywords = []
if nstars:
keywords += [ ast.keyword('nstars', ast.Num(nstars)) ]
call = to_call(self.tracer_method('trace_argument'), args, keywords)
# Repack starred expression in Python 3.5+.
if starred:
call = ast.Starred(call, ast.Load())
return call
def visit_Name(self, name):
""" Rewrite AST Name node with tracing.
Replaces variable accesses, e.g. `x`, with wrapped accesses, e.g.
trace_access('x', x)
"""
if isinstance(name.ctx, ast.Load):
boxed = self._state.get('boxed', False)
return to_call(self.tracer_method('trace_access', private=boxed), [
ast.Str(name.id),
name,
])
return name
def visit_Assign(self, node):
""" Rewrite AST Assign node with tracing.
Replaces variable assignments, e.g.
x, y = f()
with wrapped assignments, e.g.
x, y = trace_assign(('x','y'), f())
"""
target = get_single_target(node)
is_compound = not isinstance(target, ast.Name)
node.value = to_call(self.tracer_method('trace_assign'), [
self.target_to_literal(target),
self.visit_with_state(node.value,
boxed=True, multiple_values=is_compound),
])
return node
def target_to_literal(self, node):
""" Convert assignment target to AST literal node.
"""
if isinstance(node, ast.Name):
return ast.Str(node.id)
elif isinstance(node, ast.Tuple):
return to_tuple(map(self.target_to_literal, node.elts))
elif isinstance(target, ast.List):
return to_list(map(self.target_to_literal, node.elts))
else:
raise TypeError("Unsupported assignment target %s" % node)
def visit_Delete(self, node):
""" Rewrite AST Delete node with tracing.
Replaces variable deletions, e.g., replaces `del x` with
trace_delete('x')
del x
"""
target = get_single_target(node)
if isinstance(target, ast.Name):
args = [ ast.Str(target.id) ]
return [
ast.Expr(to_call(self.tracer_method('trace_delete'), args)),
node,
]
return node
class BoxedValue(HasTraits):
""" A boxed value.
Boxed values can be to pass extra data, not contained in the original
program, between tracer callbacks. This is useful for connecting function
arguments to the function call, for example.
Note that the value in the box may have any type, not necessarily primitive.
"""
value = Any() |
the-stack_106_28293 | #!/usr/bin/env python
from matplotlib import pyplot as plt
import numpy as np
import torch
import torchvision.transforms as transforms
from torch.utils.data import random_split
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import time
import pandas as pd
from customDataset import HCPanatDataset, CropBorders3D, SelectSagittalSlices3D, training_loop_val, validate
torch.set_printoptions(edgeitems=2)
torch.manual_seed(0)
#filenames
#src_dir = '../data/HCP-anat-data'
src_dir = '../data/HCP-anat'
img_dir = src_dir + '/images-three-classes/'
target_file = src_dir + '/annotations-three-classes.csv'
dataset = HCPanatDataset(csv_file=target_file, root_dir=img_dir)
#hyperparameters
n_crop = 5
n_sagittal = 20
perc_train = 0.85
n_epochs = 25
batch_size = 4
learning_rate = 1e-3
#apply some transformation to the data (crop)
transformed_dataset = HCPanatDataset(
csv_file=target_file,
root_dir=img_dir,
transform=transforms.Compose([
CropBorders3D(n_crop)]))
#check dimensions
t1, _ = transformed_dataset[0]
print("Shape of one image after cropping %i slices at the borders:" %n_crop)
print(t1.shape)
#apply some transformation to the data (crop and select axial slices)
transformed_dataset = HCPanatDataset(
csv_file=target_file,
root_dir=img_dir,
transform=transforms.Compose([
CropBorders3D(n_crop),
SelectSagittalSlices3D(n_sagittal)]))
#check dimensions
t1, _ = transformed_dataset[0]
print("Shape of one image after crop and selection of %i sagittal slices:" %n_sagittal)
print(t1.shape)
#visualize an example of T1 cropped
plt.figure()
m=np.int(t1.shape[0]/2)
im=plt.imshow(t1[m,:,:])
plt.colorbar(im)
plt.savefig('image_sample.png')
#compute the mean and std of the data
max_dim = len(t1.shape) #concatenating dimension
imgs = np.stack([img for img, _ in transformed_dataset], axis=max_dim)
mean = np.mean(imgs)
std = np.std(imgs)
mean, std
#normalize the data
normalized_dataset = HCPanatDataset(
csv_file=target_file,
root_dir=img_dir,
transform=transforms.Compose([
CropBorders3D(n_crop),
SelectSagittalSlices3D(n_sagittal),
transforms.ToTensor(),
transforms.Normalize(mean,std)]))
#split the dataset into training and test sets with torch.utils.data.random_split
N = len(normalized_dataset)
train_set, test_set = random_split(normalized_dataset, [int(perc_train*N), N-int(perc_train*N)])
print("Total number of images: %i" %N)
print("Number of training images: %i" %(perc_train*N))
print("Number of test images: %i" %(N-int(perc_train*N)))
#infer number of features
n_in = imgs.shape[0] * imgs.shape[1] * imgs.shape[2] #number of input features
labels = pd.read_csv(target_file)['label']
n_out = len(np.unique(labels)) #number of output features, i.e. number of classes
print("The number of input feature is: %i" %n_in)
print("The number of output feature is: %i" %n_out)
#assuming that we are on a CUDA machine, this should print a CUDA device:
device = (torch.device('cuda') if torch.cuda.is_available()
else torch.device('cpu'))
print(f"Training on device {device}.")
#increase (even more) the number of layers and change the loss to CrossEntropy
seq_model_large = nn.Sequential(
nn.Linear(n_in, 512),
nn.Tanh(),
nn.Linear(512, 128),
nn.Tanh(),
nn.Linear(128, 64),
nn.Tanh(),
nn.Linear(64, n_out))
seq_model_large = seq_model_large.to(device=device)
optimizer = optim.SGD(seq_model_large.parameters(), lr=learning_rate)
loss_fn = nn.CrossEntropyLoss()
#split the datasets into batches
train_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset=test_set, batch_size=batch_size, shuffle=True)
#training and showing also validation loss
t0 = time.time()
loss_vector, loss_val_vector = training_loop_val(
model = seq_model_large,
train_loader = train_loader,
test_loader = test_loader,
criterion = loss_fn,
optimizer = optimizer,
n_epochs = n_epochs)
print("Training time = %f seconds" %(time.time()-t0))
#plot training and validation loss
plt.figure()
x_axis = np.arange(n_epochs)
plt.plot(x_axis, loss_vector, 'r--', label='loss train')
plt.plot(x_axis, loss_val_vector, 'g--', label='loss val')
plt.ylim(0, 1)
plt.legend()
plt.xlabel("epochs")
plt.ylabel("loss")
plt.savefig('training_validation_losses.png')
#compute accuracy in training and validation
validate(seq_model_large, train_loader, test_loader)
numel_list = [p.numel()
for p in seq_model_large.parameters()
if p.requires_grad == True]
sum(numel_list), numel_list
"""w.r.t. the previous example that uses the entire 3D volumes, here we have 72M parameters instead of 468M parameters, even though we used a network with one more hidden layer (Linear+Tanh)! The next step is to use CNNs."""
|
the-stack_106_28294 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-18 Richard Hull and contributors
# See LICENSE.rst for details.
"""
Tests for the :py:class:`luma.core.interface.serial.bitbang` class.
"""
from unittest.mock import Mock, call
from luma.core.interface.serial import bitbang
import luma.core.error
import pytest
from helpers import rpi_gpio_missing
gpio = Mock(unsafe=True)
def setup_function(function):
gpio.reset_mock()
gpio.BCM = 1
gpio.RST = 2
gpio.DC = 3
gpio.OUT = 4
gpio.HIGH = 5
gpio.LOW = 6
def test_data():
data = (0xFF, 0x0F, 0x00)
serial = bitbang(gpio=gpio, SCLK=13, SDA=14, CE=15, DC=16, RST=17)
serial.data(data)
reset = [call(17, gpio.LOW), call(17, gpio.HIGH)]
clock = [call(13, gpio.HIGH), call(13, gpio.LOW)]
data = lambda x: call(14, 0x80 if x == gpio.HIGH else 0x00)
ce = lambda x: [call(15, x)]
dc = lambda x: [call(16, x)]
calls = reset + \
dc(gpio.HIGH) + \
ce(gpio.LOW) + \
(([data(gpio.HIGH)] + clock) * 8) + \
(([data(gpio.LOW)] + clock) * 4) + \
(([data(gpio.HIGH)] + clock) * 4) + \
(([data(gpio.LOW)] + clock) * 8) + \
ce(gpio.HIGH)
gpio.output.assert_has_calls(calls)
def test_cleanup():
serial = bitbang(gpio=gpio)
serial._managed = True
serial.cleanup()
gpio.cleanup.assert_called_once_with()
def test_unsupported_gpio_platform():
try:
bitbang()
except luma.core.error.UnsupportedPlatform as ex:
assert str(ex) == 'GPIO access not available'
except ImportError:
pytest.skip(rpi_gpio_missing)
|
the-stack_106_28295 | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Instrument `django`_ to trace Django applications.
.. _django: https://pypi.org/project/django/
Usage
-----
.. code:: python
from opentelemetry.instrumentation.django import DjangoInstrumentor
DjangoInstrumentor().instrument()
Configuration
-------------
Exclude lists
*************
To exclude certain URLs from being tracked, set the environment variable ``OTEL_PYTHON_DJANGO_EXCLUDED_URLS`` with comma delimited regexes representing which URLs to exclude.
For example,
::
export OTEL_PYTHON_DJANGO_EXCLUDED_URLS="client/.*/info,healthcheck"
will exclude requests such as ``https://site/client/123/info`` and ``https://site/xyz/healthcheck``.
Request attributes
********************
To extract certain attributes from Django's request object and use them as span attributes, set the environment variable ``OTEL_PYTHON_DJANGO_TRACED_REQUEST_ATTRS`` to a comma
delimited list of request attribute names.
For example,
::
export OTEL_PYTHON_DJANGO_TRACED_REQUEST_ATTRS='path_info,content_type'
will extract path_info and content_type attributes from every traced request and add them as span attritbues.
Django Request object reference: https://docs.djangoproject.com/en/3.1/ref/request-response/#attributes
Request and Response hooks
***************************
The instrumentation supports specifying request and response hooks. These are functions that get called back by the instrumentation right after a Span is created for a request
and right before the span is finished while processing a response. The hooks can be configured as follows:
.. code:: python
def request_hook(span, request):
pass
def response_hook(span, request, response):
pass
DjangoInstrumentation().instrument(request_hook=request_hook, response_hook=response_hook)
"""
from logging import getLogger
from os import environ
from django.conf import settings
from opentelemetry.instrumentation.django.environment_variables import (
OTEL_PYTHON_DJANGO_INSTRUMENT,
)
from opentelemetry.instrumentation.django.middleware import _DjangoMiddleware
from opentelemetry.instrumentation.django.version import __version__
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
_logger = getLogger(__name__)
class DjangoInstrumentor(BaseInstrumentor):
"""An instrumentor for Django
See `BaseInstrumentor`
"""
_opentelemetry_middleware = ".".join(
[_DjangoMiddleware.__module__, _DjangoMiddleware.__qualname__]
)
def _instrument(self, **kwargs):
# FIXME this is probably a pattern that will show up in the rest of the
# ext. Find a better way of implementing this.
if environ.get(OTEL_PYTHON_DJANGO_INSTRUMENT) == "False":
return
_DjangoMiddleware._otel_request_hook = kwargs.pop("request_hook", None)
_DjangoMiddleware._otel_response_hook = kwargs.pop(
"response_hook", None
)
# This can not be solved, but is an inherent problem of this approach:
# the order of middleware entries matters, and here you have no control
# on that:
# https://docs.djangoproject.com/en/3.0/topics/http/middleware/#activating-middleware
# https://docs.djangoproject.com/en/3.0/ref/middleware/#middleware-ordering
settings_middleware = getattr(settings, "MIDDLEWARE", [])
# Django allows to specify middlewares as a tuple, so we convert this tuple to a
# list, otherwise we wouldn't be able to call append/remove
if isinstance(settings_middleware, tuple):
settings_middleware = list(settings_middleware)
settings_middleware.insert(0, self._opentelemetry_middleware)
setattr(settings, "MIDDLEWARE", settings_middleware)
def _uninstrument(self, **kwargs):
settings_middleware = getattr(settings, "MIDDLEWARE", None)
# FIXME This is starting to smell like trouble. We have 2 mechanisms
# that may make this condition be True, one implemented in
# BaseInstrumentor and another one implemented in _instrument. Both
# stop _instrument from running and thus, settings_middleware not being
# set.
if settings_middleware is None or (
self._opentelemetry_middleware not in settings_middleware
):
return
settings_middleware.remove(self._opentelemetry_middleware)
setattr(settings, "MIDDLEWARE", settings_middleware)
|
the-stack_106_28299 | import asyncio
import secrets
from jotbox import Jotbox, Payload, JWTDecodeError
from jotbox.whitelist.redis import RedisWhitelist
# Define the payload model
class AccessPayload(Payload):
user_id: int
# Create our Jotbox instance with some settings
jot = Jotbox[AccessPayload](
encode_key=secrets.token_hex(),
payload_type=AccessPayload,
leeway=10,
expires_in=7200, # Expire tokens after 2 hours (optional)
# Whitelist is optional, skip this if you don't need revoke support
idle_timeout=600, # Revoke token after 10 minutes without use
whitelist=RedisWhitelist("redis://localhost"),
)
async def run():
# Create a token
token = await jot.create_token(user_id=42)
print(token.token) # the encoded token as string
# >> eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzUxMiJ9.eyJqdGkiOiJkMDFlYmVmNjlk...
print(repr(token.payload)) # The payload object
# >> AccessPayload(jti=UUID('d682eabf-...'), iat=1593638317, user_id=42)
# Verify the encoded token
payload = await jot.verified_payload(token.token)
print(payload)
# >> AccessPayload(jti=UUID('d682eabf-...'), iat=1593638317, user_id=42)
# revoke the token (logout)
await jot.revoke_payload(payload)
try:
await jot.verified_payload(token.token)
except JWTDecodeError as e:
print(repr(e))
# >> RevokedTokenError('Token ID d682eabf-... has been revoked')
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
|
the-stack_106_28300 | #! /usr/bin/env python
import argparse
from string import Template
import settings
def make_igvsession(igv_ed_umcu, igv_ed_hc, bam, vcf_hc, sample_id, vcf_SNV, axis, statistic):
template_file = Template(open(settings.template_xml).read())
new_session = "{0}_{1}_{2}_igv.xml".format(sample_id, statistic, args.runid)
igv_ed_hc_test = "{0}_{1}_test".format(igv_ed_hc, statistic)
igv_ed_umcu_test = "{0}_{1}_test".format(igv_ed_umcu, statistic)
bam_coverage = "{0}_coverage".format(bam)
bam_junctions = "{0}_junctions".format(bam)
min_axis, mid_axis, max_axis = axis
ratioid_UMCU = "{0}_UMCU".format(statistic)
ratioid_HC = "{0}_HC".format(statistic)
substitute_dic = {'session_var':new_session, 'igv_ed_umcu':igv_ed_umcu, 'igv_ed_hc':igv_ed_hc,
'bam':bam, 'vcf_hc':vcf_hc, 'sample_id':sample_id, 'igv_ed_hc_test':igv_ed_hc_test,
'igv_ed_umcu_test':igv_ed_umcu_test, 'bam_coverage':bam_coverage, 'bam_junctions':bam_junctions,
'vcf_SNV':vcf_SNV, 'min_axis':min_axis, 'mid_axis':mid_axis, 'max_axis':max_axis,
'ratioid_UMCU':ratioid_UMCU, 'ratioid_HC':ratioid_HC
}
new_file = template_file.substitute(substitute_dic)
return new_file
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('bam', help='BAM file')
parser.add_argument('output', help='Output folder')
parser.add_argument('sampleid', help='Sample ID')
parser.add_argument('template', help='Full path to template XML')
parser.add_argument('refdate', help='Date of the used reference set')
parser.add_argument('runid', help='Run ID')
parser.add_argument('--pipeline', default='nf', choices=['nf', 'iap'], help='pipeline used for sample processing (nf = nexflow, IAP = illumina analysis pipeline')
parser.add_argument('--vcf_filename_suffix', help='suffix that was included in the VCF filename. Do not include spaces or underscores in suffix')
args = parser.parse_args()
igv_settings = settings.igv_settings
igv_extension = "ref.igv"
vcf_extension = "exome_calls.vcf"
if args.vcf_filename_suffix:
igv_extension = "{}_ref.igv".format(args.vcf_filename_suffix)
vcf_extension = "exome_calls_{}.vcf".format(args.vcf_filename_suffix)
igv_ed_umcu = "igv_tracks/UMCU_{0}_{1}_{2}_{3}".format(args.refdate, args.sampleid, args.runid, igv_extension)
igv_ed_hc = "igv_tracks/HC_{0}_{1}_{2}_{3}".format(args.refdate, args.sampleid, args.runid, igv_extension)
vcf_hc = "HC/HC_{0}_{1}_{2}_{3}".format(args.refdate, args.sampleid, args.runid, vcf_extension)
if args.pipeline == "iap": #For analysis based on IAP
bam_id = "../{0}/mapping/{1}".format(args.sampleid, args.bam)
vcf_SNV = "../single_sample_vcf/{0}.filtered_variants.vcf".format(args.sampleid)
elif args.pipeline == "nf": #For NF pipeline.
bam_id = "../bam_files/{0}.bam".format(args.sampleid)
vcf_SNV = "../single_sample_vcf/{0}_{1}.vcf".format(args.sampleid, args.runid)
for statistic in igv_settings:
write_file = open("{0}/{1}_{2}_{3}_igv.xml".format(args.output, args.sampleid, statistic, args.runid), "w")
write_file.write(make_igvsession(igv_ed_umcu, igv_ed_hc, bam_id, vcf_hc, args.sampleid, vcf_SNV, igv_settings[statistic], statistic))
write_file.close()
|
the-stack_106_28301 | import bs4
import requests
import os
import brotli
limit = 1290
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):].replace('/', '')
return text
def get_links(i):
url = f"http://23.95.221.108/page/{i}"
html = requests.get(url).text
soup = bs4.BeautifulSoup(html, 'html.parser')
arts = soup.findAll('article')
hrefs = [x.find('a').attrs['href'] for x in arts]
return [remove_prefix(x, 'https://it-eb.com/') for x in hrefs]
def get_book(path):
url = f"http://23.95.221.108/{path}"
return requests.get(url).text
def write(file_name, content):
fd = open(file_name, 'wb')
fd.write(content)
fd.flush()
fd.close()
def main():
dir_name = "../txt/"
if not os.path.exists(dir_name):
os.mkdir(dir_name)
for i in range(1, limit + 1):
for path in get_links(i):
file = f'{dir_name}{path}.txt.brotli'
if not os.path.exists(file):
print(file)
html = get_book(path)
bro = brotli.compress(html.encode(), brotli.MODE_TEXT)
write(file, bro)
main()
|
the-stack_106_28303 | from tradssat.tmpl.var import CharacterVar, FloatVar
cul_vars_PTSUB = {
CharacterVar('VAR#', 6, spc=0, info='Identification code or number for the specific cultivar.'),
CharacterVar('VAR-NAME', 16, header_fill='.', info='Name of cultivar.'),
CharacterVar('EXPNO', 5, miss='.', info='Number of experiments used for calibration.'),
CharacterVar(
'ECO#', 6, info='Ecotype code or this cultivar, points to the Ecotype in the ECO file (currently not used).'
),
FloatVar('G2', 5, 0, info='Leaf area expansion rate after tuber initiation (cm2/m2 d)'),
FloatVar('G3', 5, 1, info='Potential tuber growth rate (g/m2 d)'),
FloatVar('PD', 5, 1,
info='Index that supresses tuber growth during the period that immediately follows tuber induction'),
FloatVar('P2', 5, 1, info='Tuber initiation sensitivity to long photoperiods'),
FloatVar('TC', 5, 1, info='Upper critical temperature for tuber initiation (C)'),
}
eco_vars_PTSUB = {
CharacterVar('ECO#', 6, spc=0, info='Code for the ecotype to which a cultivar belongs (see *.cul file)'),
CharacterVar('ECONAME', 18, header_fill='.', info='Name of the ecotype, which is referenced from *.CUL file'),
FloatVar('RUE1', 5, 1, info='Radiation use efficiency, ISTAGE=1, g plant dry matter/MJ PAR'),
FloatVar('RUE2', 5, 1, info='Radiation use efficiency, ISTAGE>1, g plant dry matter/MJ PAR')
}
|
the-stack_106_28304 | from setuptools import find_packages
from setuptools import setup
package_name = 'ros2doctor'
setup(
name=package_name,
version='0.18.0',
packages=find_packages(exclude=['test']),
data_files=[
('share/' + package_name, ['package.xml']),
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
],
install_requires=['ros2cli'],
zip_safe=True,
author='Claire Wang',
author_email='[email protected]',
maintainer='Aditya Pande, Audrow Nash, Michael Jeronimo',
maintainer_email='[email protected], [email protected], [email protected]', # noqa: E501
url='',
download_url='',
keywords=[],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
],
description='The doctor command for ROS 2 command line tools',
long_description="""\
The package provides a cli tool to check potential issues in a ROS 2 system""",
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'ros2cli.command': [
'doctor = ros2doctor.command.doctor:DoctorCommand',
'wtf = ros2doctor.command.doctor:WtfCommand',
],
'ros2doctor.checks': [
'PlatformCheck = ros2doctor.api.platform:PlatformCheck',
'NetworkCheck = ros2doctor.api.network:NetworkCheck',
'TopicCheck = ros2doctor.api.topic:TopicCheck',
'QoSCompatibilityCheck = ros2doctor.api.qos_compatibility:QoSCompatibilityCheck',
'PackageCheck = ros2doctor.api.package:PackageCheck',
],
'ros2doctor.report': [
'PlatformReport = ros2doctor.api.platform:PlatformReport',
'RosdistroReport = ros2doctor.api.platform:RosdistroReport',
'NetworkReport = ros2doctor.api.network:NetworkReport',
'RMWReport = ros2doctor.api.rmw:RMWReport',
'TopicReport = ros2doctor.api.topic:TopicReport',
'QoSCompatibilityReport = ros2doctor.api.qos_compatibility:QoSCompatibilityReport',
'PackageReport = ros2doctor.api.package:PackageReport',
],
'ros2cli.extension_point': [
'ros2doctor.verb = ros2doctor.verb:VerbExtension',
],
'ros2doctor.verb': [
'hello = ros2doctor.verb.hello:HelloVerb'
]
}
)
|
the-stack_106_28306 | #!/usr/bin/env python3
import sys
import gzip
filename_fa = sys.argv[1]
f_fa = open(filename_fa, 'r')
if filename_fa.endswith('.gz'):
f_fa = gzip.open(filename_fa, 'rt')
seq_list = dict()
seqlen_list = dict()
for line in f_fa:
if line.startswith('>'):
tmp_tokens = line.strip().lstrip('>').split('|')
tmp_gene_id = tmp_tokens[2]
tmp_prot_id = tmp_tokens[0]
tmp_gene_name = tmp_tokens[-2]
tmp_seqlen = int(tmp_tokens[-1])
tmp_species = 'Unknown'
if tmp_gene_id.startswith('ENSG0'):
tmp_species = 'HUMAN'
if tmp_gene_id.startswith('ENSMUSG0'):
tmp_species = 'MOUSE'
tmp_gene = '%s|%s' % (tmp_species, tmp_gene_name)
if tmp_gene not in seq_list:
seq_list[tmp_gene] = dict()
seqlen_list[tmp_gene] = dict()
seqlen_list[tmp_gene][tmp_prot_id] = tmp_seqlen
seq_list[tmp_gene][tmp_prot_id] = []
else:
seq_list[tmp_gene][tmp_prot_id].append(line.strip())
f_fa.close()
for tmp_gene in seq_list.keys():
tmp_prot_id_list = sorted(seqlen_list[tmp_gene].keys(),
key=seqlen_list[tmp_gene].get)
max_prot_id = tmp_prot_id_list[-1]
print(">GENCODE|%s|%s\n%s" %
(tmp_gene, max_prot_id, '\n'.join(seq_list[tmp_gene][max_prot_id])))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.